diff options
Diffstat (limited to 'drivers')
138 files changed, 19741 insertions, 860 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 3f8a231fe754..d74d9fbb9fd2 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig" | |||
52 | 52 | ||
53 | source "drivers/spi/Kconfig" | 53 | source "drivers/spi/Kconfig" |
54 | 54 | ||
55 | source "drivers/gpio/Kconfig" | ||
56 | |||
55 | source "drivers/w1/Kconfig" | 57 | source "drivers/w1/Kconfig" |
56 | 58 | ||
57 | source "drivers/power/Kconfig" | 59 | source "drivers/power/Kconfig" |
diff --git a/drivers/Makefile b/drivers/Makefile index 0ee9a8a4095e..f1c11db52a57 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | # Rewritten to use lists instead of if-statements. | 5 | # Rewritten to use lists instead of if-statements. |
6 | # | 6 | # |
7 | 7 | ||
8 | obj-$(CONFIG_HAVE_GPIO_LIB) += gpio/ | ||
8 | obj-$(CONFIG_PCI) += pci/ | 9 | obj-$(CONFIG_PCI) += pci/ |
9 | obj-$(CONFIG_PARISC) += parisc/ | 10 | obj-$(CONFIG_PARISC) += parisc/ |
10 | obj-$(CONFIG_RAPIDIO) += rapidio/ | 11 | obj-$(CONFIG_RAPIDIO) += rapidio/ |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index eb1f82f79153..199ea2146153 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <linux/dmi.h> | 38 | #include <linux/dmi.h> |
39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/pm_qos_params.h> |
42 | #include <linux/clockchips.h> | 42 | #include <linux/clockchips.h> |
43 | #include <linux/cpuidle.h> | 43 | #include <linux/cpuidle.h> |
44 | 44 | ||
@@ -648,7 +648,8 @@ static void acpi_processor_idle(void) | |||
648 | if (cx->promotion.state && | 648 | if (cx->promotion.state && |
649 | ((cx->promotion.state - pr->power.states) <= max_cstate)) { | 649 | ((cx->promotion.state - pr->power.states) <= max_cstate)) { |
650 | if (sleep_ticks > cx->promotion.threshold.ticks && | 650 | if (sleep_ticks > cx->promotion.threshold.ticks && |
651 | cx->promotion.state->latency <= system_latency_constraint()) { | 651 | cx->promotion.state->latency <= |
652 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
652 | cx->promotion.count++; | 653 | cx->promotion.count++; |
653 | cx->demotion.count = 0; | 654 | cx->demotion.count = 0; |
654 | if (cx->promotion.count >= | 655 | if (cx->promotion.count >= |
@@ -692,7 +693,8 @@ static void acpi_processor_idle(void) | |||
692 | * or if the latency of the current state is unacceptable | 693 | * or if the latency of the current state is unacceptable |
693 | */ | 694 | */ |
694 | if ((pr->power.state - pr->power.states) > max_cstate || | 695 | if ((pr->power.state - pr->power.states) > max_cstate || |
695 | pr->power.state->latency > system_latency_constraint()) { | 696 | pr->power.state->latency > |
697 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
696 | if (cx->demotion.state) | 698 | if (cx->demotion.state) |
697 | next_state = cx->demotion.state; | 699 | next_state = cx->demotion.state; |
698 | } | 700 | } |
@@ -1200,7 +1202,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) | |||
1200 | "maximum allowed latency: %d usec\n", | 1202 | "maximum allowed latency: %d usec\n", |
1201 | pr->power.state ? pr->power.state - pr->power.states : 0, | 1203 | pr->power.state ? pr->power.state - pr->power.states : 0, |
1202 | max_cstate, (unsigned)pr->power.bm_activity, | 1204 | max_cstate, (unsigned)pr->power.bm_activity, |
1203 | system_latency_constraint()); | 1205 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); |
1204 | 1206 | ||
1205 | seq_puts(seq, "states:\n"); | 1207 | seq_puts(seq, "states:\n"); |
1206 | 1208 | ||
@@ -1718,8 +1720,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1718 | "ACPI: processor limited to max C-state %d\n", | 1720 | "ACPI: processor limited to max C-state %d\n", |
1719 | max_cstate); | 1721 | max_cstate); |
1720 | first_run++; | 1722 | first_run++; |
1721 | #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) | 1723 | #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) |
1722 | register_latency_notifier(&acpi_processor_latency_notifier); | 1724 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, |
1725 | &acpi_processor_latency_notifier); | ||
1723 | #endif | 1726 | #endif |
1724 | } | 1727 | } |
1725 | 1728 | ||
@@ -1806,7 +1809,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1806 | */ | 1809 | */ |
1807 | cpu_idle_wait(); | 1810 | cpu_idle_wait(); |
1808 | #ifdef CONFIG_SMP | 1811 | #ifdef CONFIG_SMP |
1809 | unregister_latency_notifier(&acpi_processor_latency_notifier); | 1812 | pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, |
1813 | &acpi_processor_latency_notifier); | ||
1810 | #endif | 1814 | #endif |
1811 | } | 1815 | } |
1812 | #endif | 1816 | #endif |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 96e614a1c169..59e65edc5820 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -108,17 +108,6 @@ struct inic_port_priv { | |||
108 | u8 cached_pirq_mask; | 108 | u8 cached_pirq_mask; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | static int inic_slave_config(struct scsi_device *sdev) | ||
112 | { | ||
113 | /* This controller is braindamaged. dma_boundary is 0xffff | ||
114 | * like others but it will lock up the whole machine HARD if | ||
115 | * 65536 byte PRD entry is fed. Reduce maximum segment size. | ||
116 | */ | ||
117 | blk_queue_max_segment_size(sdev->request_queue, 65536 - 512); | ||
118 | |||
119 | return ata_scsi_slave_config(sdev); | ||
120 | } | ||
121 | |||
122 | static struct scsi_host_template inic_sht = { | 111 | static struct scsi_host_template inic_sht = { |
123 | .module = THIS_MODULE, | 112 | .module = THIS_MODULE, |
124 | .name = DRV_NAME, | 113 | .name = DRV_NAME, |
@@ -132,7 +121,7 @@ static struct scsi_host_template inic_sht = { | |||
132 | .use_clustering = ATA_SHT_USE_CLUSTERING, | 121 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
133 | .proc_name = DRV_NAME, | 122 | .proc_name = DRV_NAME, |
134 | .dma_boundary = ATA_DMA_BOUNDARY, | 123 | .dma_boundary = ATA_DMA_BOUNDARY, |
135 | .slave_configure = inic_slave_config, | 124 | .slave_configure = ata_scsi_slave_config, |
136 | .slave_destroy = ata_scsi_slave_destroy, | 125 | .slave_destroy = ata_scsi_slave_destroy, |
137 | .bios_param = ata_std_bios_param, | 126 | .bios_param = ata_std_bios_param, |
138 | }; | 127 | }; |
@@ -730,6 +719,18 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
730 | return rc; | 719 | return rc; |
731 | } | 720 | } |
732 | 721 | ||
722 | /* | ||
723 | * This controller is braindamaged. dma_boundary is 0xffff | ||
724 | * like others but it will lock up the whole machine HARD if | ||
725 | * 65536 byte PRD entry is fed. Reduce maximum segment size. | ||
726 | */ | ||
727 | rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); | ||
728 | if (rc) { | ||
729 | dev_printk(KERN_ERR, &pdev->dev, | ||
730 | "failed to set the maximum segment size.\n"); | ||
731 | return rc; | ||
732 | } | ||
733 | |||
733 | rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); | 734 | rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); |
734 | if (rc) { | 735 | if (rc) { |
735 | dev_printk(KERN_ERR, &pdev->dev, | 736 | dev_printk(KERN_ERR, &pdev->dev, |
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index 1375b5345a0a..3b28658f5a1f 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c | |||
@@ -423,6 +423,7 @@ static int bpa10x_send_frame(struct sk_buff *skb) | |||
423 | break; | 423 | break; |
424 | 424 | ||
425 | default: | 425 | default: |
426 | usb_free_urb(urb); | ||
426 | return -EILSEQ; | 427 | return -EILSEQ; |
427 | } | 428 | } |
428 | 429 | ||
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index a18f9b8c9e12..7703d6e06fd9 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -704,7 +704,7 @@ static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t * | |||
704 | 704 | ||
705 | static int bt3c_config(struct pcmcia_device *link) | 705 | static int bt3c_config(struct pcmcia_device *link) |
706 | { | 706 | { |
707 | static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | 707 | static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; |
708 | bt3c_info_t *info = link->priv; | 708 | bt3c_info_t *info = link->priv; |
709 | tuple_t tuple; | 709 | tuple_t tuple; |
710 | u_short buf[256]; | 710 | u_short buf[256]; |
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index b786f6187902..58630cc1eff2 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c | |||
@@ -162,10 +162,8 @@ static int btsdio_rx_packet(struct btsdio_data *data) | |||
162 | bt_cb(skb)->pkt_type = hdr[3]; | 162 | bt_cb(skb)->pkt_type = hdr[3]; |
163 | 163 | ||
164 | err = hci_recv_frame(skb); | 164 | err = hci_recv_frame(skb); |
165 | if (err < 0) { | 165 | if (err < 0) |
166 | kfree(skb); | ||
167 | return err; | 166 | return err; |
168 | } | ||
169 | 167 | ||
170 | sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); | 168 | sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); |
171 | 169 | ||
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index dade1626865b..68d1d258e6a4 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c | |||
@@ -634,7 +634,7 @@ static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t * | |||
634 | 634 | ||
635 | static int btuart_config(struct pcmcia_device *link) | 635 | static int btuart_config(struct pcmcia_device *link) |
636 | { | 636 | { |
637 | static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | 637 | static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; |
638 | btuart_info_t *info = link->priv; | 638 | btuart_info_t *info = link->priv; |
639 | tuple_t tuple; | 639 | tuple_t tuple; |
640 | u_short buf[256]; | 640 | u_short buf[256]; |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index 98a9cdeaffb6..372c7ef633da 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -111,6 +111,7 @@ static struct usb_device_id blacklist_ids[] = { | |||
111 | { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, | 111 | { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, |
112 | 112 | ||
113 | /* Broadcom BCM2035 */ | 113 | /* Broadcom BCM2035 */ |
114 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, | ||
114 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, | 115 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, |
115 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, | 116 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, |
116 | 117 | ||
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index b83824c41329..c69f79598e47 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -117,7 +117,8 @@ struct agp_bridge_driver { | |||
117 | void (*free_by_type)(struct agp_memory *); | 117 | void (*free_by_type)(struct agp_memory *); |
118 | void *(*agp_alloc_page)(struct agp_bridge_data *); | 118 | void *(*agp_alloc_page)(struct agp_bridge_data *); |
119 | void (*agp_destroy_page)(void *, int flags); | 119 | void (*agp_destroy_page)(void *, int flags); |
120 | int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); | 120 | int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); |
121 | void (*chipset_flush)(struct agp_bridge_data *); | ||
121 | }; | 122 | }; |
122 | 123 | ||
123 | struct agp_bridge_data { | 124 | struct agp_bridge_data { |
@@ -235,6 +236,9 @@ struct agp_bridge_data { | |||
235 | #define I965_PGETBL_SIZE_512KB (0 << 1) | 236 | #define I965_PGETBL_SIZE_512KB (0 << 1) |
236 | #define I965_PGETBL_SIZE_256KB (1 << 1) | 237 | #define I965_PGETBL_SIZE_256KB (1 << 1) |
237 | #define I965_PGETBL_SIZE_128KB (2 << 1) | 238 | #define I965_PGETBL_SIZE_128KB (2 << 1) |
239 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
240 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
241 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
238 | #define G33_PGETBL_SIZE_MASK (3 << 8) | 242 | #define G33_PGETBL_SIZE_MASK (3 << 8) |
239 | #define G33_PGETBL_SIZE_1M (1 << 8) | 243 | #define G33_PGETBL_SIZE_1M (1 << 8) |
240 | #define G33_PGETBL_SIZE_2M (2 << 8) | 244 | #define G33_PGETBL_SIZE_2M (2 << 8) |
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index aa8f3a39a704..e77c17838c8a 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c | |||
@@ -11,29 +11,28 @@ | |||
11 | 11 | ||
12 | #include "agp.h" | 12 | #include "agp.h" |
13 | 13 | ||
14 | static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma, | 14 | static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, |
15 | unsigned long address, | 15 | struct vm_fault *vmf) |
16 | int *type) | ||
17 | { | 16 | { |
18 | alpha_agp_info *agp = agp_bridge->dev_private_data; | 17 | alpha_agp_info *agp = agp_bridge->dev_private_data; |
19 | dma_addr_t dma_addr; | 18 | dma_addr_t dma_addr; |
20 | unsigned long pa; | 19 | unsigned long pa; |
21 | struct page *page; | 20 | struct page *page; |
22 | 21 | ||
23 | dma_addr = address - vma->vm_start + agp->aperture.bus_base; | 22 | dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start |
23 | + agp->aperture.bus_base; | ||
24 | pa = agp->ops->translate(agp, dma_addr); | 24 | pa = agp->ops->translate(agp, dma_addr); |
25 | 25 | ||
26 | if (pa == (unsigned long)-EINVAL) | 26 | if (pa == (unsigned long)-EINVAL) |
27 | return NULL; /* no translation */ | 27 | return VM_FAULT_SIGBUS; /* no translation */ |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Get the page, inc the use count, and return it | 30 | * Get the page, inc the use count, and return it |
31 | */ | 31 | */ |
32 | page = virt_to_page(__va(pa)); | 32 | page = virt_to_page(__va(pa)); |
33 | get_page(page); | 33 | get_page(page); |
34 | if (type) | 34 | vmf->page = page; |
35 | *type = VM_FAULT_MINOR; | 35 | return 0; |
36 | return page; | ||
37 | } | 36 | } |
38 | 37 | ||
39 | static struct aper_size_info_fixed alpha_core_agp_sizes[] = | 38 | static struct aper_size_info_fixed alpha_core_agp_sizes[] = |
@@ -42,7 +41,7 @@ static struct aper_size_info_fixed alpha_core_agp_sizes[] = | |||
42 | }; | 41 | }; |
43 | 42 | ||
44 | struct vm_operations_struct alpha_core_agp_vm_ops = { | 43 | struct vm_operations_struct alpha_core_agp_vm_ops = { |
45 | .nopage = alpha_core_agp_vm_nopage, | 44 | .fault = alpha_core_agp_vm_fault, |
46 | }; | 45 | }; |
47 | 46 | ||
48 | 47 | ||
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 1405a42585e1..87be46406daf 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -436,10 +436,6 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, | |||
436 | return -ENODEV; | 436 | return -ENODEV; |
437 | } | 437 | } |
438 | cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); | 438 | cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); |
439 | if (!cap_ptr) { | ||
440 | pci_dev_put(gfxcard); | ||
441 | continue; | ||
442 | } | ||
443 | } | 439 | } |
444 | 440 | ||
445 | /* With so many variants of NVidia cards, it's simpler just | 441 | /* With so many variants of NVidia cards, it's simpler just |
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 2720882e66fe..b1bdd015165c 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * fix some real stupidity. It's only by chance we can bump | 43 | * fix some real stupidity. It's only by chance we can bump |
44 | * past 0.99 at all due to some boolean logic error. */ | 44 | * past 0.99 at all due to some boolean logic error. */ |
45 | #define AGPGART_VERSION_MAJOR 0 | 45 | #define AGPGART_VERSION_MAJOR 0 |
46 | #define AGPGART_VERSION_MINOR 102 | 46 | #define AGPGART_VERSION_MINOR 103 |
47 | static const struct agp_version agp_current_version = | 47 | static const struct agp_version agp_current_version = |
48 | { | 48 | { |
49 | .major = AGPGART_VERSION_MAJOR, | 49 | .major = AGPGART_VERSION_MAJOR, |
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c index ecd4248861b9..39275794fe63 100644 --- a/drivers/char/agp/compat_ioctl.c +++ b/drivers/char/agp/compat_ioctl.c | |||
@@ -273,6 +273,10 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
273 | case AGPIOC_UNBIND32: | 273 | case AGPIOC_UNBIND32: |
274 | ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); | 274 | ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); |
275 | break; | 275 | break; |
276 | |||
277 | case AGPIOC_CHIPSET_FLUSH32: | ||
278 | ret_val = agpioc_chipset_flush_wrap(curr_priv); | ||
279 | break; | ||
276 | } | 280 | } |
277 | 281 | ||
278 | ioctl_out: | 282 | ioctl_out: |
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h index 71939d637236..0c9678ac0371 100644 --- a/drivers/char/agp/compat_ioctl.h +++ b/drivers/char/agp/compat_ioctl.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t) | 39 | #define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t) |
40 | #define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t) | 40 | #define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t) |
41 | #define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t) | 41 | #define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t) |
42 | #define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10) | ||
42 | 43 | ||
43 | struct agp_info32 { | 44 | struct agp_info32 { |
44 | struct agp_version version; /* version of the driver */ | 45 | struct agp_version version; /* version of the driver */ |
@@ -101,5 +102,6 @@ void agp_free_memory_wrap(struct agp_memory *memory); | |||
101 | struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); | 102 | struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); |
102 | struct agp_memory *agp_find_mem_by_key(int key); | 103 | struct agp_memory *agp_find_mem_by_key(int key); |
103 | struct agp_client *agp_find_client_by_pid(pid_t id); | 104 | struct agp_client *agp_find_client_by_pid(pid_t id); |
105 | int agpioc_chipset_flush_wrap(struct agp_file_private *priv); | ||
104 | 106 | ||
105 | #endif /* _AGP_COMPAT_H */ | 107 | #endif /* _AGP_COMPAT_H */ |
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 7791e98de51c..55d7a82bd071 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c | |||
@@ -689,7 +689,7 @@ static int agp_open(struct inode *inode, struct file *file) | |||
689 | set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); | 689 | set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); |
690 | priv->my_pid = current->pid; | 690 | priv->my_pid = current->pid; |
691 | 691 | ||
692 | if ((current->uid == 0) || (current->suid == 0)) { | 692 | if (capable(CAP_SYS_RAWIO)) { |
693 | /* Root priv, can be controller */ | 693 | /* Root priv, can be controller */ |
694 | set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); | 694 | set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); |
695 | } | 695 | } |
@@ -960,6 +960,13 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) | |||
960 | return agp_unbind_memory(memory); | 960 | return agp_unbind_memory(memory); |
961 | } | 961 | } |
962 | 962 | ||
963 | int agpioc_chipset_flush_wrap(struct agp_file_private *priv) | ||
964 | { | ||
965 | DBG(""); | ||
966 | agp_flush_chipset(agp_bridge); | ||
967 | return 0; | ||
968 | } | ||
969 | |||
963 | static int agp_ioctl(struct inode *inode, struct file *file, | 970 | static int agp_ioctl(struct inode *inode, struct file *file, |
964 | unsigned int cmd, unsigned long arg) | 971 | unsigned int cmd, unsigned long arg) |
965 | { | 972 | { |
@@ -1033,6 +1040,10 @@ static int agp_ioctl(struct inode *inode, struct file *file, | |||
1033 | case AGPIOC_UNBIND: | 1040 | case AGPIOC_UNBIND: |
1034 | ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg); | 1041 | ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg); |
1035 | break; | 1042 | break; |
1043 | |||
1044 | case AGPIOC_CHIPSET_FLUSH: | ||
1045 | ret_val = agpioc_chipset_flush_wrap(curr_priv); | ||
1046 | break; | ||
1036 | } | 1047 | } |
1037 | 1048 | ||
1038 | ioctl_out: | 1049 | ioctl_out: |
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 1a4674ce0c71..7484bc759c4c 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -80,6 +80,13 @@ static int agp_get_key(void) | |||
80 | return -1; | 80 | return -1; |
81 | } | 81 | } |
82 | 82 | ||
83 | void agp_flush_chipset(struct agp_bridge_data *bridge) | ||
84 | { | ||
85 | if (bridge->driver->chipset_flush) | ||
86 | bridge->driver->chipset_flush(bridge); | ||
87 | } | ||
88 | EXPORT_SYMBOL(agp_flush_chipset); | ||
89 | |||
83 | /* | 90 | /* |
84 | * Use kmalloc if possible for the page list. Otherwise fall back to | 91 | * Use kmalloc if possible for the page list. Otherwise fall back to |
85 | * vmalloc. This speeds things up and also saves memory for small AGP | 92 | * vmalloc. This speeds things up and also saves memory for small AGP |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 189efb6ef970..eeea50a1d22a 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -14,8 +14,8 @@ | |||
14 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | 14 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a |
15 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | 15 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 |
16 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | 16 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 |
17 | #define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980 | 17 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 |
18 | #define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982 | 18 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 |
19 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | 19 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 |
20 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | 20 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 |
21 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | 21 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 |
@@ -32,13 +32,24 @@ | |||
32 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | 32 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 |
33 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | 33 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 |
34 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | 34 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 |
35 | #define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40 | ||
36 | #define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42 | ||
37 | |||
38 | /* cover 915 and 945 variants */ | ||
39 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
40 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
41 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
42 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
43 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
44 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
35 | 45 | ||
36 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | 46 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ |
37 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \ | 47 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ |
38 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | 48 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ |
39 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | 49 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ |
40 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | 50 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ |
41 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | 51 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \ |
52 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB) | ||
42 | 53 | ||
43 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | 54 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ |
44 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | 55 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ |
@@ -71,9 +82,11 @@ extern int agp_memory_reserved; | |||
71 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | 82 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) |
72 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | 83 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) |
73 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | 84 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) |
85 | #define I915_IFPADDR 0x60 | ||
74 | 86 | ||
75 | /* Intel 965G registers */ | 87 | /* Intel 965G registers */ |
76 | #define I965_MSAC 0x62 | 88 | #define I965_MSAC 0x62 |
89 | #define I965_IFPADDR 0x70 | ||
77 | 90 | ||
78 | /* Intel 7505 registers */ | 91 | /* Intel 7505 registers */ |
79 | #define INTEL_I7505_APSIZE 0x74 | 92 | #define INTEL_I7505_APSIZE 0x74 |
@@ -115,6 +128,13 @@ static struct _intel_private { | |||
115 | * popup and for the GTT. | 128 | * popup and for the GTT. |
116 | */ | 129 | */ |
117 | int gtt_entries; /* i830+ */ | 130 | int gtt_entries; /* i830+ */ |
131 | union { | ||
132 | void __iomem *i9xx_flush_page; | ||
133 | void *i8xx_flush_page; | ||
134 | }; | ||
135 | struct page *i8xx_page; | ||
136 | struct resource ifp_resource; | ||
137 | int resource_valid; | ||
118 | } intel_private; | 138 | } intel_private; |
119 | 139 | ||
120 | static int intel_i810_fetch_size(void) | 140 | static int intel_i810_fetch_size(void) |
@@ -204,7 +224,7 @@ static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | |||
204 | /* Exists to support ARGB cursors */ | 224 | /* Exists to support ARGB cursors */ |
205 | static void *i8xx_alloc_pages(void) | 225 | static void *i8xx_alloc_pages(void) |
206 | { | 226 | { |
207 | struct page * page; | 227 | struct page *page; |
208 | 228 | ||
209 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | 229 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); |
210 | if (page == NULL) | 230 | if (page == NULL) |
@@ -433,7 +453,7 @@ static void intel_i830_init_gtt_entries(void) | |||
433 | static const int ddt[4] = { 0, 16, 32, 64 }; | 453 | static const int ddt[4] = { 0, 16, 32, 64 }; |
434 | int size; /* reserved space (in kb) at the top of stolen memory */ | 454 | int size; /* reserved space (in kb) at the top of stolen memory */ |
435 | 455 | ||
436 | pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); | 456 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); |
437 | 457 | ||
438 | if (IS_I965) { | 458 | if (IS_I965) { |
439 | u32 pgetbl_ctl; | 459 | u32 pgetbl_ctl; |
@@ -453,6 +473,15 @@ static void intel_i830_init_gtt_entries(void) | |||
453 | case I965_PGETBL_SIZE_512KB: | 473 | case I965_PGETBL_SIZE_512KB: |
454 | size = 512; | 474 | size = 512; |
455 | break; | 475 | break; |
476 | case I965_PGETBL_SIZE_1MB: | ||
477 | size = 1024; | ||
478 | break; | ||
479 | case I965_PGETBL_SIZE_2MB: | ||
480 | size = 2048; | ||
481 | break; | ||
482 | case I965_PGETBL_SIZE_1_5MB: | ||
483 | size = 1024 + 512; | ||
484 | break; | ||
456 | default: | 485 | default: |
457 | printk(KERN_INFO PFX "Unknown page table size, " | 486 | printk(KERN_INFO PFX "Unknown page table size, " |
458 | "assuming 512KB\n"); | 487 | "assuming 512KB\n"); |
@@ -523,26 +552,14 @@ static void intel_i830_init_gtt_entries(void) | |||
523 | break; | 552 | break; |
524 | case I915_GMCH_GMS_STOLEN_48M: | 553 | case I915_GMCH_GMS_STOLEN_48M: |
525 | /* Check it's really I915G */ | 554 | /* Check it's really I915G */ |
526 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || | 555 | if (IS_I915 || IS_I965 || IS_G33) |
527 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || | ||
528 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || | ||
529 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || | ||
530 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || | ||
531 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB || | ||
532 | IS_I965 || IS_G33) | ||
533 | gtt_entries = MB(48) - KB(size); | 556 | gtt_entries = MB(48) - KB(size); |
534 | else | 557 | else |
535 | gtt_entries = 0; | 558 | gtt_entries = 0; |
536 | break; | 559 | break; |
537 | case I915_GMCH_GMS_STOLEN_64M: | 560 | case I915_GMCH_GMS_STOLEN_64M: |
538 | /* Check it's really I915G */ | 561 | /* Check it's really I915G */ |
539 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || | 562 | if (IS_I915 || IS_I965 || IS_G33) |
540 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || | ||
541 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || | ||
542 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || | ||
543 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || | ||
544 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB || | ||
545 | IS_I965 || IS_G33) | ||
546 | gtt_entries = MB(64) - KB(size); | 563 | gtt_entries = MB(64) - KB(size); |
547 | else | 564 | else |
548 | gtt_entries = 0; | 565 | gtt_entries = 0; |
@@ -575,6 +592,45 @@ static void intel_i830_init_gtt_entries(void) | |||
575 | intel_private.gtt_entries = gtt_entries; | 592 | intel_private.gtt_entries = gtt_entries; |
576 | } | 593 | } |
577 | 594 | ||
595 | static void intel_i830_fini_flush(void) | ||
596 | { | ||
597 | kunmap(intel_private.i8xx_page); | ||
598 | intel_private.i8xx_flush_page = NULL; | ||
599 | unmap_page_from_agp(intel_private.i8xx_page); | ||
600 | |||
601 | __free_page(intel_private.i8xx_page); | ||
602 | intel_private.i8xx_page = NULL; | ||
603 | } | ||
604 | |||
605 | static void intel_i830_setup_flush(void) | ||
606 | { | ||
607 | /* return if we've already set the flush mechanism up */ | ||
608 | if (intel_private.i8xx_page) | ||
609 | return; | ||
610 | |||
611 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
612 | if (!intel_private.i8xx_page) | ||
613 | return; | ||
614 | |||
615 | /* make page uncached */ | ||
616 | map_page_into_agp(intel_private.i8xx_page); | ||
617 | |||
618 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
619 | if (!intel_private.i8xx_flush_page) | ||
620 | intel_i830_fini_flush(); | ||
621 | } | ||
622 | |||
623 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
624 | { | ||
625 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
626 | int i; | ||
627 | |||
628 | for (i = 0; i < 256; i += 2) | ||
629 | *(pg + i) = i; | ||
630 | |||
631 | wmb(); | ||
632 | } | ||
633 | |||
578 | /* The intel i830 automatically initializes the agp aperture during POST. | 634 | /* The intel i830 automatically initializes the agp aperture during POST. |
579 | * Use the memory already set aside for in the GTT. | 635 | * Use the memory already set aside for in the GTT. |
580 | */ | 636 | */ |
@@ -590,10 +646,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | |||
590 | num_entries = size->num_entries; | 646 | num_entries = size->num_entries; |
591 | agp_bridge->gatt_table_real = NULL; | 647 | agp_bridge->gatt_table_real = NULL; |
592 | 648 | ||
593 | pci_read_config_dword(intel_private.pcidev,I810_MMADDR,&temp); | 649 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); |
594 | temp &= 0xfff80000; | 650 | temp &= 0xfff80000; |
595 | 651 | ||
596 | intel_private.registers = ioremap(temp,128 * 4096); | 652 | intel_private.registers = ioremap(temp, 128 * 4096); |
597 | if (!intel_private.registers) | 653 | if (!intel_private.registers) |
598 | return -ENOMEM; | 654 | return -ENOMEM; |
599 | 655 | ||
@@ -633,7 +689,7 @@ static int intel_i830_fetch_size(void) | |||
633 | return values[0].size; | 689 | return values[0].size; |
634 | } | 690 | } |
635 | 691 | ||
636 | pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); | 692 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); |
637 | 693 | ||
638 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | 694 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { |
639 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | 695 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; |
@@ -657,12 +713,12 @@ static int intel_i830_configure(void) | |||
657 | 713 | ||
658 | current_size = A_SIZE_FIX(agp_bridge->current_size); | 714 | current_size = A_SIZE_FIX(agp_bridge->current_size); |
659 | 715 | ||
660 | pci_read_config_dword(intel_private.pcidev,I810_GMADDR,&temp); | 716 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); |
661 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 717 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); |
662 | 718 | ||
663 | pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); | 719 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); |
664 | gmch_ctrl |= I830_GMCH_ENABLED; | 720 | gmch_ctrl |= I830_GMCH_ENABLED; |
665 | pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl); | 721 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); |
666 | 722 | ||
667 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | 723 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); |
668 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | 724 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ |
@@ -675,6 +731,8 @@ static int intel_i830_configure(void) | |||
675 | } | 731 | } |
676 | 732 | ||
677 | global_cache_flush(); | 733 | global_cache_flush(); |
734 | |||
735 | intel_i830_setup_flush(); | ||
678 | return 0; | 736 | return 0; |
679 | } | 737 | } |
680 | 738 | ||
@@ -683,9 +741,10 @@ static void intel_i830_cleanup(void) | |||
683 | iounmap(intel_private.registers); | 741 | iounmap(intel_private.registers); |
684 | } | 742 | } |
685 | 743 | ||
686 | static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int type) | 744 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, |
745 | int type) | ||
687 | { | 746 | { |
688 | int i,j,num_entries; | 747 | int i, j, num_entries; |
689 | void *temp; | 748 | void *temp; |
690 | int ret = -EINVAL; | 749 | int ret = -EINVAL; |
691 | int mask_type; | 750 | int mask_type; |
@@ -697,10 +756,10 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int | |||
697 | num_entries = A_SIZE_FIX(temp)->num_entries; | 756 | num_entries = A_SIZE_FIX(temp)->num_entries; |
698 | 757 | ||
699 | if (pg_start < intel_private.gtt_entries) { | 758 | if (pg_start < intel_private.gtt_entries) { |
700 | printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", | 759 | printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", |
701 | pg_start,intel_private.gtt_entries); | 760 | pg_start, intel_private.gtt_entries); |
702 | 761 | ||
703 | printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); | 762 | printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); |
704 | goto out_err; | 763 | goto out_err; |
705 | } | 764 | } |
706 | 765 | ||
@@ -738,8 +797,8 @@ out_err: | |||
738 | return ret; | 797 | return ret; |
739 | } | 798 | } |
740 | 799 | ||
741 | static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start, | 800 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, |
742 | int type) | 801 | int type) |
743 | { | 802 | { |
744 | int i; | 803 | int i; |
745 | 804 | ||
@@ -747,7 +806,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start, | |||
747 | return 0; | 806 | return 0; |
748 | 807 | ||
749 | if (pg_start < intel_private.gtt_entries) { | 808 | if (pg_start < intel_private.gtt_entries) { |
750 | printk (KERN_INFO PFX "Trying to disable local/stolen memory\n"); | 809 | printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); |
751 | return -EINVAL; | 810 | return -EINVAL; |
752 | } | 811 | } |
753 | 812 | ||
@@ -760,7 +819,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start, | |||
760 | return 0; | 819 | return 0; |
761 | } | 820 | } |
762 | 821 | ||
763 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type) | 822 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) |
764 | { | 823 | { |
765 | if (type == AGP_PHYS_MEMORY) | 824 | if (type == AGP_PHYS_MEMORY) |
766 | return alloc_agpphysmem_i8xx(pg_count, type); | 825 | return alloc_agpphysmem_i8xx(pg_count, type); |
@@ -768,6 +827,95 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type) | |||
768 | return NULL; | 827 | return NULL; |
769 | } | 828 | } |
770 | 829 | ||
830 | static int intel_alloc_chipset_flush_resource(void) | ||
831 | { | ||
832 | int ret; | ||
833 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
834 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
835 | pcibios_align_resource, agp_bridge->dev); | ||
836 | |||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | static void intel_i915_setup_chipset_flush(void) | ||
841 | { | ||
842 | int ret; | ||
843 | u32 temp; | ||
844 | |||
845 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
846 | if (!(temp & 0x1)) { | ||
847 | intel_alloc_chipset_flush_resource(); | ||
848 | intel_private.resource_valid = 1; | ||
849 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
850 | } else { | ||
851 | temp &= ~1; | ||
852 | |||
853 | intel_private.resource_valid = 1; | ||
854 | intel_private.ifp_resource.start = temp; | ||
855 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
856 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
857 | /* some BIOSes reserve this area in a pnp some don't */ | ||
858 | if (ret) | ||
859 | intel_private.resource_valid = 0; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | static void intel_i965_g33_setup_chipset_flush(void) | ||
864 | { | ||
865 | u32 temp_hi, temp_lo; | ||
866 | int ret; | ||
867 | |||
868 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
869 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
870 | |||
871 | if (!(temp_lo & 0x1)) { | ||
872 | |||
873 | intel_alloc_chipset_flush_resource(); | ||
874 | |||
875 | intel_private.resource_valid = 1; | ||
876 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
877 | upper_32_bits(intel_private.ifp_resource.start)); | ||
878 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
879 | } else { | ||
880 | u64 l64; | ||
881 | |||
882 | temp_lo &= ~0x1; | ||
883 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
884 | |||
885 | intel_private.resource_valid = 1; | ||
886 | intel_private.ifp_resource.start = l64; | ||
887 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
888 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
889 | /* some BIOSes reserve this area in a pnp some don't */ | ||
890 | if (ret) | ||
891 | intel_private.resource_valid = 0; | ||
892 | } | ||
893 | } | ||
894 | |||
895 | static void intel_i9xx_setup_flush(void) | ||
896 | { | ||
897 | /* return if already configured */ | ||
898 | if (intel_private.ifp_resource.start) | ||
899 | return; | ||
900 | |||
901 | /* setup a resource for this object */ | ||
902 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
903 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
904 | |||
905 | /* Setup chipset flush for 915 */ | ||
906 | if (IS_I965 || IS_G33) { | ||
907 | intel_i965_g33_setup_chipset_flush(); | ||
908 | } else { | ||
909 | intel_i915_setup_chipset_flush(); | ||
910 | } | ||
911 | |||
912 | if (intel_private.ifp_resource.start) { | ||
913 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
914 | if (!intel_private.i9xx_flush_page) | ||
915 | printk(KERN_INFO "unable to ioremap flush page - no chipset flushing"); | ||
916 | } | ||
917 | } | ||
918 | |||
771 | static int intel_i915_configure(void) | 919 | static int intel_i915_configure(void) |
772 | { | 920 | { |
773 | struct aper_size_info_fixed *current_size; | 921 | struct aper_size_info_fixed *current_size; |
@@ -781,9 +929,9 @@ static int intel_i915_configure(void) | |||
781 | 929 | ||
782 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 930 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); |
783 | 931 | ||
784 | pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); | 932 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); |
785 | gmch_ctrl |= I830_GMCH_ENABLED; | 933 | gmch_ctrl |= I830_GMCH_ENABLED; |
786 | pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl); | 934 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); |
787 | 935 | ||
788 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | 936 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); |
789 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | 937 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ |
@@ -796,19 +944,34 @@ static int intel_i915_configure(void) | |||
796 | } | 944 | } |
797 | 945 | ||
798 | global_cache_flush(); | 946 | global_cache_flush(); |
947 | |||
948 | intel_i9xx_setup_flush(); | ||
949 | |||
799 | return 0; | 950 | return 0; |
800 | } | 951 | } |
801 | 952 | ||
802 | static void intel_i915_cleanup(void) | 953 | static void intel_i915_cleanup(void) |
803 | { | 954 | { |
955 | if (intel_private.i9xx_flush_page) | ||
956 | iounmap(intel_private.i9xx_flush_page); | ||
957 | if (intel_private.resource_valid) | ||
958 | release_resource(&intel_private.ifp_resource); | ||
959 | intel_private.ifp_resource.start = 0; | ||
960 | intel_private.resource_valid = 0; | ||
804 | iounmap(intel_private.gtt); | 961 | iounmap(intel_private.gtt); |
805 | iounmap(intel_private.registers); | 962 | iounmap(intel_private.registers); |
806 | } | 963 | } |
807 | 964 | ||
808 | static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start, | 965 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) |
809 | int type) | ||
810 | { | 966 | { |
811 | int i,j,num_entries; | 967 | if (intel_private.i9xx_flush_page) |
968 | writel(1, intel_private.i9xx_flush_page); | ||
969 | } | ||
970 | |||
971 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
972 | int type) | ||
973 | { | ||
974 | int i, j, num_entries; | ||
812 | void *temp; | 975 | void *temp; |
813 | int ret = -EINVAL; | 976 | int ret = -EINVAL; |
814 | int mask_type; | 977 | int mask_type; |
@@ -820,10 +983,10 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start, | |||
820 | num_entries = A_SIZE_FIX(temp)->num_entries; | 983 | num_entries = A_SIZE_FIX(temp)->num_entries; |
821 | 984 | ||
822 | if (pg_start < intel_private.gtt_entries) { | 985 | if (pg_start < intel_private.gtt_entries) { |
823 | printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", | 986 | printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", |
824 | pg_start,intel_private.gtt_entries); | 987 | pg_start, intel_private.gtt_entries); |
825 | 988 | ||
826 | printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); | 989 | printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); |
827 | goto out_err; | 990 | goto out_err; |
828 | } | 991 | } |
829 | 992 | ||
@@ -861,8 +1024,8 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start, | |||
861 | return ret; | 1024 | return ret; |
862 | } | 1025 | } |
863 | 1026 | ||
864 | static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start, | 1027 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, |
865 | int type) | 1028 | int type) |
866 | { | 1029 | { |
867 | int i; | 1030 | int i; |
868 | 1031 | ||
@@ -870,13 +1033,13 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start, | |||
870 | return 0; | 1033 | return 0; |
871 | 1034 | ||
872 | if (pg_start < intel_private.gtt_entries) { | 1035 | if (pg_start < intel_private.gtt_entries) { |
873 | printk (KERN_INFO PFX "Trying to disable local/stolen memory\n"); | 1036 | printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); |
874 | return -EINVAL; | 1037 | return -EINVAL; |
875 | } | 1038 | } |
876 | 1039 | ||
877 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | 1040 | for (i = pg_start; i < (mem->page_count + pg_start); i++) |
878 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | 1041 | writel(agp_bridge->scratch_page, intel_private.gtt+i); |
879 | } | 1042 | |
880 | readl(intel_private.gtt+i-1); | 1043 | readl(intel_private.gtt+i-1); |
881 | 1044 | ||
882 | agp_bridge->driver->tlb_flush(mem); | 1045 | agp_bridge->driver->tlb_flush(mem); |
@@ -923,7 +1086,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | |||
923 | agp_bridge->gatt_table_real = NULL; | 1086 | agp_bridge->gatt_table_real = NULL; |
924 | 1087 | ||
925 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | 1088 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); |
926 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR,&temp2); | 1089 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); |
927 | 1090 | ||
928 | if (IS_G33) | 1091 | if (IS_G33) |
929 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | 1092 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ |
@@ -933,7 +1096,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | |||
933 | 1096 | ||
934 | temp &= 0xfff80000; | 1097 | temp &= 0xfff80000; |
935 | 1098 | ||
936 | intel_private.registers = ioremap(temp,128 * 4096); | 1099 | intel_private.registers = ioremap(temp, 128 * 4096); |
937 | if (!intel_private.registers) { | 1100 | if (!intel_private.registers) { |
938 | iounmap(intel_private.gtt); | 1101 | iounmap(intel_private.gtt); |
939 | return -ENOMEM; | 1102 | return -ENOMEM; |
@@ -980,6 +1143,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | |||
980 | struct aper_size_info_fixed *size; | 1143 | struct aper_size_info_fixed *size; |
981 | int num_entries; | 1144 | int num_entries; |
982 | u32 temp; | 1145 | u32 temp; |
1146 | int gtt_offset, gtt_size; | ||
983 | 1147 | ||
984 | size = agp_bridge->current_size; | 1148 | size = agp_bridge->current_size; |
985 | page_order = size->page_order; | 1149 | page_order = size->page_order; |
@@ -989,13 +1153,18 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | |||
989 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | 1153 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); |
990 | 1154 | ||
991 | temp &= 0xfff00000; | 1155 | temp &= 0xfff00000; |
992 | intel_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024); | ||
993 | 1156 | ||
994 | if (!intel_private.gtt) | 1157 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB) |
995 | return -ENOMEM; | 1158 | gtt_offset = gtt_size = MB(2); |
1159 | else | ||
1160 | gtt_offset = gtt_size = KB(512); | ||
1161 | |||
1162 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
996 | 1163 | ||
1164 | if (!intel_private.gtt) | ||
1165 | return -ENOMEM; | ||
997 | 1166 | ||
998 | intel_private.registers = ioremap(temp,128 * 4096); | 1167 | intel_private.registers = ioremap(temp, 128 * 4096); |
999 | if (!intel_private.registers) { | 1168 | if (!intel_private.registers) { |
1000 | iounmap(intel_private.gtt); | 1169 | iounmap(intel_private.gtt); |
1001 | return -ENOMEM; | 1170 | return -ENOMEM; |
@@ -1154,7 +1323,7 @@ static int intel_815_configure(void) | |||
1154 | /* the Intel 815 chipset spec. says that bits 29-31 in the | 1323 | /* the Intel 815 chipset spec. says that bits 29-31 in the |
1155 | * ATTBASE register are reserved -> try not to write them */ | 1324 | * ATTBASE register are reserved -> try not to write them */ |
1156 | if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { | 1325 | if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { |
1157 | printk (KERN_EMERG PFX "gatt bus addr too high"); | 1326 | printk(KERN_EMERG PFX "gatt bus addr too high"); |
1158 | return -EINVAL; | 1327 | return -EINVAL; |
1159 | } | 1328 | } |
1160 | 1329 | ||
@@ -1296,6 +1465,8 @@ static int intel_845_configure(void) | |||
1296 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); | 1465 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); |
1297 | /* clear any possible error conditions */ | 1466 | /* clear any possible error conditions */ |
1298 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); | 1467 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); |
1468 | |||
1469 | intel_i830_setup_flush(); | ||
1299 | return 0; | 1470 | return 0; |
1300 | } | 1471 | } |
1301 | 1472 | ||
@@ -1552,6 +1723,7 @@ static const struct agp_bridge_driver intel_830_driver = { | |||
1552 | .agp_alloc_page = agp_generic_alloc_page, | 1723 | .agp_alloc_page = agp_generic_alloc_page, |
1553 | .agp_destroy_page = agp_generic_destroy_page, | 1724 | .agp_destroy_page = agp_generic_destroy_page, |
1554 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 1725 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
1726 | .chipset_flush = intel_i830_chipset_flush, | ||
1555 | }; | 1727 | }; |
1556 | 1728 | ||
1557 | static const struct agp_bridge_driver intel_820_driver = { | 1729 | static const struct agp_bridge_driver intel_820_driver = { |
@@ -1648,6 +1820,7 @@ static const struct agp_bridge_driver intel_845_driver = { | |||
1648 | .agp_alloc_page = agp_generic_alloc_page, | 1820 | .agp_alloc_page = agp_generic_alloc_page, |
1649 | .agp_destroy_page = agp_generic_destroy_page, | 1821 | .agp_destroy_page = agp_generic_destroy_page, |
1650 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 1822 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
1823 | .chipset_flush = intel_i830_chipset_flush, | ||
1651 | }; | 1824 | }; |
1652 | 1825 | ||
1653 | static const struct agp_bridge_driver intel_850_driver = { | 1826 | static const struct agp_bridge_driver intel_850_driver = { |
@@ -1721,6 +1894,7 @@ static const struct agp_bridge_driver intel_915_driver = { | |||
1721 | .agp_alloc_page = agp_generic_alloc_page, | 1894 | .agp_alloc_page = agp_generic_alloc_page, |
1722 | .agp_destroy_page = agp_generic_destroy_page, | 1895 | .agp_destroy_page = agp_generic_destroy_page, |
1723 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 1896 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
1897 | .chipset_flush = intel_i915_chipset_flush, | ||
1724 | }; | 1898 | }; |
1725 | 1899 | ||
1726 | static const struct agp_bridge_driver intel_i965_driver = { | 1900 | static const struct agp_bridge_driver intel_i965_driver = { |
@@ -1746,6 +1920,7 @@ static const struct agp_bridge_driver intel_i965_driver = { | |||
1746 | .agp_alloc_page = agp_generic_alloc_page, | 1920 | .agp_alloc_page = agp_generic_alloc_page, |
1747 | .agp_destroy_page = agp_generic_destroy_page, | 1921 | .agp_destroy_page = agp_generic_destroy_page, |
1748 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 1922 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
1923 | .chipset_flush = intel_i915_chipset_flush, | ||
1749 | }; | 1924 | }; |
1750 | 1925 | ||
1751 | static const struct agp_bridge_driver intel_7505_driver = { | 1926 | static const struct agp_bridge_driver intel_7505_driver = { |
@@ -1795,6 +1970,7 @@ static const struct agp_bridge_driver intel_g33_driver = { | |||
1795 | .agp_alloc_page = agp_generic_alloc_page, | 1970 | .agp_alloc_page = agp_generic_alloc_page, |
1796 | .agp_destroy_page = agp_generic_destroy_page, | 1971 | .agp_destroy_page = agp_generic_destroy_page, |
1797 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 1972 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
1973 | .chipset_flush = intel_i915_chipset_flush, | ||
1798 | }; | 1974 | }; |
1799 | 1975 | ||
1800 | static int find_gmch(u16 device) | 1976 | static int find_gmch(u16 device) |
@@ -1804,7 +1980,7 @@ static int find_gmch(u16 device) | |||
1804 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); | 1980 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
1805 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { | 1981 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { |
1806 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, | 1982 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, |
1807 | device, gmch_device); | 1983 | device, gmch_device); |
1808 | } | 1984 | } |
1809 | 1985 | ||
1810 | if (!gmch_device) | 1986 | if (!gmch_device) |
@@ -1867,7 +2043,7 @@ static const struct intel_driver_description { | |||
1867 | NULL, &intel_915_driver }, | 2043 | NULL, &intel_915_driver }, |
1868 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", | 2044 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", |
1869 | NULL, &intel_i965_driver }, | 2045 | NULL, &intel_i965_driver }, |
1870 | { PCI_DEVICE_ID_INTEL_82965G_1_HB, PCI_DEVICE_ID_INTEL_82965G_1_IG, 0, "965G", | 2046 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", |
1871 | NULL, &intel_i965_driver }, | 2047 | NULL, &intel_i965_driver }, |
1872 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", | 2048 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", |
1873 | NULL, &intel_i965_driver }, | 2049 | NULL, &intel_i965_driver }, |
@@ -1885,6 +2061,8 @@ static const struct intel_driver_description { | |||
1885 | NULL, &intel_g33_driver }, | 2061 | NULL, &intel_g33_driver }, |
1886 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 2062 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", |
1887 | NULL, &intel_g33_driver }, | 2063 | NULL, &intel_g33_driver }, |
2064 | { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0, | ||
2065 | "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, | ||
1888 | { 0, 0, 0, NULL, NULL, NULL } | 2066 | { 0, 0, 0, NULL, NULL, NULL } |
1889 | }; | 2067 | }; |
1890 | 2068 | ||
@@ -1924,7 +2102,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
1924 | if (intel_agp_chipsets[i].name == NULL) { | 2102 | if (intel_agp_chipsets[i].name == NULL) { |
1925 | if (cap_ptr) | 2103 | if (cap_ptr) |
1926 | printk(KERN_WARNING PFX "Unsupported Intel chipset" | 2104 | printk(KERN_WARNING PFX "Unsupported Intel chipset" |
1927 | "(device id: %04x)\n", pdev->device); | 2105 | "(device id: %04x)\n", pdev->device); |
1928 | agp_put_bridge(bridge); | 2106 | agp_put_bridge(bridge); |
1929 | return -ENODEV; | 2107 | return -ENODEV; |
1930 | } | 2108 | } |
@@ -1937,7 +2115,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
1937 | intel_agp_chipsets[i].gmch_chip_id); | 2115 | intel_agp_chipsets[i].gmch_chip_id); |
1938 | agp_put_bridge(bridge); | 2116 | agp_put_bridge(bridge); |
1939 | return -ENODEV; | 2117 | return -ENODEV; |
1940 | } | 2118 | } |
1941 | 2119 | ||
1942 | bridge->dev = pdev; | 2120 | bridge->dev = pdev; |
1943 | bridge->capndx = cap_ptr; | 2121 | bridge->capndx = cap_ptr; |
@@ -2067,7 +2245,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2067 | ID(PCI_DEVICE_ID_INTEL_82945GM_HB), | 2245 | ID(PCI_DEVICE_ID_INTEL_82945GM_HB), |
2068 | ID(PCI_DEVICE_ID_INTEL_82945GME_HB), | 2246 | ID(PCI_DEVICE_ID_INTEL_82945GME_HB), |
2069 | ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), | 2247 | ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), |
2070 | ID(PCI_DEVICE_ID_INTEL_82965G_1_HB), | 2248 | ID(PCI_DEVICE_ID_INTEL_82G35_HB), |
2071 | ID(PCI_DEVICE_ID_INTEL_82965Q_HB), | 2249 | ID(PCI_DEVICE_ID_INTEL_82965Q_HB), |
2072 | ID(PCI_DEVICE_ID_INTEL_82965G_HB), | 2250 | ID(PCI_DEVICE_ID_INTEL_82965G_HB), |
2073 | ID(PCI_DEVICE_ID_INTEL_82965GM_HB), | 2251 | ID(PCI_DEVICE_ID_INTEL_82965GM_HB), |
@@ -2075,6 +2253,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2075 | ID(PCI_DEVICE_ID_INTEL_G33_HB), | 2253 | ID(PCI_DEVICE_ID_INTEL_G33_HB), |
2076 | ID(PCI_DEVICE_ID_INTEL_Q35_HB), | 2254 | ID(PCI_DEVICE_ID_INTEL_Q35_HB), |
2077 | ID(PCI_DEVICE_ID_INTEL_Q33_HB), | 2255 | ID(PCI_DEVICE_ID_INTEL_Q33_HB), |
2256 | ID(PCI_DEVICE_ID_INTEL_IGD_HB), | ||
2078 | { } | 2257 | { } |
2079 | }; | 2258 | }; |
2080 | 2259 | ||
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 0118b9817a95..84cdf9025737 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -234,11 +234,11 @@ static DEVICE_ATTR(rng_available, S_IRUGO, | |||
234 | NULL); | 234 | NULL); |
235 | 235 | ||
236 | 236 | ||
237 | static void unregister_miscdev(void) | 237 | static void unregister_miscdev(bool suspended) |
238 | { | 238 | { |
239 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); | 239 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); |
240 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); | 240 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); |
241 | misc_deregister(&rng_miscdev); | 241 | __misc_deregister(&rng_miscdev, suspended); |
242 | } | 242 | } |
243 | 243 | ||
244 | static int register_miscdev(void) | 244 | static int register_miscdev(void) |
@@ -313,7 +313,7 @@ out: | |||
313 | } | 313 | } |
314 | EXPORT_SYMBOL_GPL(hwrng_register); | 314 | EXPORT_SYMBOL_GPL(hwrng_register); |
315 | 315 | ||
316 | void hwrng_unregister(struct hwrng *rng) | 316 | void __hwrng_unregister(struct hwrng *rng, bool suspended) |
317 | { | 317 | { |
318 | int err; | 318 | int err; |
319 | 319 | ||
@@ -332,11 +332,11 @@ void hwrng_unregister(struct hwrng *rng) | |||
332 | } | 332 | } |
333 | } | 333 | } |
334 | if (list_empty(&rng_list)) | 334 | if (list_empty(&rng_list)) |
335 | unregister_miscdev(); | 335 | unregister_miscdev(suspended); |
336 | 336 | ||
337 | mutex_unlock(&rng_mutex); | 337 | mutex_unlock(&rng_mutex); |
338 | } | 338 | } |
339 | EXPORT_SYMBOL_GPL(hwrng_unregister); | 339 | EXPORT_SYMBOL_GPL(__hwrng_unregister); |
340 | 340 | ||
341 | 341 | ||
342 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); | 342 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 71c8cd7fa15f..a39101feb2ed 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c | |||
@@ -232,8 +232,9 @@ int misc_register(struct miscdevice * misc) | |||
232 | } | 232 | } |
233 | 233 | ||
234 | /** | 234 | /** |
235 | * misc_deregister - unregister a miscellaneous device | 235 | * __misc_deregister - unregister a miscellaneous device |
236 | * @misc: device to unregister | 236 | * @misc: device to unregister |
237 | * @suspended: to be set if the function is used during suspend/resume | ||
237 | * | 238 | * |
238 | * Unregister a miscellaneous device that was previously | 239 | * Unregister a miscellaneous device that was previously |
239 | * successfully registered with misc_register(). Success | 240 | * successfully registered with misc_register(). Success |
@@ -241,7 +242,7 @@ int misc_register(struct miscdevice * misc) | |||
241 | * indicates an error. | 242 | * indicates an error. |
242 | */ | 243 | */ |
243 | 244 | ||
244 | int misc_deregister(struct miscdevice * misc) | 245 | int __misc_deregister(struct miscdevice *misc, bool suspended) |
245 | { | 246 | { |
246 | int i = misc->minor; | 247 | int i = misc->minor; |
247 | 248 | ||
@@ -250,7 +251,11 @@ int misc_deregister(struct miscdevice * misc) | |||
250 | 251 | ||
251 | mutex_lock(&misc_mtx); | 252 | mutex_lock(&misc_mtx); |
252 | list_del(&misc->list); | 253 | list_del(&misc->list); |
253 | device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor)); | 254 | if (suspended) |
255 | destroy_suspended_device(misc_class, | ||
256 | MKDEV(MISC_MAJOR, misc->minor)); | ||
257 | else | ||
258 | device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor)); | ||
254 | if (i < DYNAMIC_MINORS && i>0) { | 259 | if (i < DYNAMIC_MINORS && i>0) { |
255 | misc_minors[i>>3] &= ~(1 << (misc->minor & 7)); | 260 | misc_minors[i>>3] &= ~(1 << (misc->minor & 7)); |
256 | } | 261 | } |
@@ -259,7 +264,7 @@ int misc_deregister(struct miscdevice * misc) | |||
259 | } | 264 | } |
260 | 265 | ||
261 | EXPORT_SYMBOL(misc_register); | 266 | EXPORT_SYMBOL(misc_register); |
262 | EXPORT_SYMBOL(misc_deregister); | 267 | EXPORT_SYMBOL(__misc_deregister); |
263 | 268 | ||
264 | static int __init misc_init(void) | 269 | static int __init misc_init(void) |
265 | { | 270 | { |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 02518da6a386..454d7324ba40 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -308,7 +308,8 @@ static unsigned int calc_baudv(unsigned char fidi) | |||
308 | return (wcrcf / wbrcf); | 308 | return (wcrcf / wbrcf); |
309 | } | 309 | } |
310 | 310 | ||
311 | static unsigned short io_read_num_rec_bytes(ioaddr_t iobase, unsigned short *s) | 311 | static unsigned short io_read_num_rec_bytes(unsigned int iobase, |
312 | unsigned short *s) | ||
312 | { | 313 | { |
313 | unsigned short tmp; | 314 | unsigned short tmp; |
314 | 315 | ||
@@ -426,7 +427,7 @@ static struct card_fixup card_fixups[] = { | |||
426 | static void set_cardparameter(struct cm4000_dev *dev) | 427 | static void set_cardparameter(struct cm4000_dev *dev) |
427 | { | 428 | { |
428 | int i; | 429 | int i; |
429 | ioaddr_t iobase = dev->p_dev->io.BasePort1; | 430 | unsigned int iobase = dev->p_dev->io.BasePort1; |
430 | u_int8_t stopbits = 0x02; /* ISO default */ | 431 | u_int8_t stopbits = 0x02; /* ISO default */ |
431 | 432 | ||
432 | DEBUGP(3, dev, "-> set_cardparameter\n"); | 433 | DEBUGP(3, dev, "-> set_cardparameter\n"); |
@@ -459,7 +460,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) | |||
459 | unsigned short num_bytes_read; | 460 | unsigned short num_bytes_read; |
460 | unsigned char pts_reply[4]; | 461 | unsigned char pts_reply[4]; |
461 | ssize_t rc; | 462 | ssize_t rc; |
462 | ioaddr_t iobase = dev->p_dev->io.BasePort1; | 463 | unsigned int iobase = dev->p_dev->io.BasePort1; |
463 | 464 | ||
464 | rc = 0; | 465 | rc = 0; |
465 | 466 | ||
@@ -610,7 +611,7 @@ exit_setprotocol: | |||
610 | return rc; | 611 | return rc; |
611 | } | 612 | } |
612 | 613 | ||
613 | static int io_detect_cm4000(ioaddr_t iobase, struct cm4000_dev *dev) | 614 | static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev) |
614 | { | 615 | { |
615 | 616 | ||
616 | /* note: statemachine is assumed to be reset */ | 617 | /* note: statemachine is assumed to be reset */ |
@@ -671,7 +672,7 @@ static void terminate_monitor(struct cm4000_dev *dev) | |||
671 | static void monitor_card(unsigned long p) | 672 | static void monitor_card(unsigned long p) |
672 | { | 673 | { |
673 | struct cm4000_dev *dev = (struct cm4000_dev *) p; | 674 | struct cm4000_dev *dev = (struct cm4000_dev *) p; |
674 | ioaddr_t iobase = dev->p_dev->io.BasePort1; | 675 | unsigned int iobase = dev->p_dev->io.BasePort1; |
675 | unsigned short s; | 676 | unsigned short s; |
676 | struct ptsreq ptsreq; | 677 | struct ptsreq ptsreq; |
677 | int i, atrc; | 678 | int i, atrc; |
@@ -933,7 +934,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, | |||
933 | loff_t *ppos) | 934 | loff_t *ppos) |
934 | { | 935 | { |
935 | struct cm4000_dev *dev = filp->private_data; | 936 | struct cm4000_dev *dev = filp->private_data; |
936 | ioaddr_t iobase = dev->p_dev->io.BasePort1; | 937 | unsigned int iobase = dev->p_dev->io.BasePort1; |
937 | ssize_t rc; | 938 | ssize_t rc; |
938 | int i, j, k; | 939 | int i, j, k; |
939 | 940 | ||
@@ -1054,7 +1055,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf, | |||
1054 | size_t count, loff_t *ppos) | 1055 | size_t count, loff_t *ppos) |
1055 | { | 1056 | { |
1056 | struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data; | 1057 | struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data; |
1057 | ioaddr_t iobase = dev->p_dev->io.BasePort1; | 1058 | unsigned int iobase = dev->p_dev->io.BasePort1; |
1058 | unsigned short s; | 1059 | unsigned short s; |
1059 | unsigned char tmp; | 1060 | unsigned char tmp; |
1060 | unsigned char infolen; | 1061 | unsigned char infolen; |
@@ -1408,7 +1409,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | |||
1408 | unsigned long arg) | 1409 | unsigned long arg) |
1409 | { | 1410 | { |
1410 | struct cm4000_dev *dev = filp->private_data; | 1411 | struct cm4000_dev *dev = filp->private_data; |
1411 | ioaddr_t iobase = dev->p_dev->io.BasePort1; | 1412 | unsigned int iobase = dev->p_dev->io.BasePort1; |
1412 | struct pcmcia_device *link; | 1413 | struct pcmcia_device *link; |
1413 | int size; | 1414 | int size; |
1414 | int rc; | 1415 | int rc; |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index d2fabe7863a9..2a98d99cbd46 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/notifier.h> | 14 | #include <linux/notifier.h> |
15 | #include <linux/latency.h> | 15 | #include <linux/pm_qos_params.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/cpuidle.h> | 17 | #include <linux/cpuidle.h> |
18 | 18 | ||
@@ -265,7 +265,10 @@ static struct notifier_block cpuidle_latency_notifier = { | |||
265 | .notifier_call = cpuidle_latency_notify, | 265 | .notifier_call = cpuidle_latency_notify, |
266 | }; | 266 | }; |
267 | 267 | ||
268 | #define latency_notifier_init(x) do { register_latency_notifier(x); } while (0) | 268 | static inline void latency_notifier_init(struct notifier_block *n) |
269 | { | ||
270 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); | ||
271 | } | ||
269 | 272 | ||
270 | #else /* CONFIG_SMP */ | 273 | #else /* CONFIG_SMP */ |
271 | 274 | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index eb666ecae7c9..ba7b9a6b17a1 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/cpuidle.h> | 16 | #include <linux/cpuidle.h> |
17 | #include <linux/latency.h> | 17 | #include <linux/pm_qos_params.h> |
18 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
19 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
20 | 20 | ||
@@ -81,7 +81,8 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
81 | /* consider promotion */ | 81 | /* consider promotion */ |
82 | if (last_idx < dev->state_count - 1 && | 82 | if (last_idx < dev->state_count - 1 && |
83 | last_residency > last_state->threshold.promotion_time && | 83 | last_residency > last_state->threshold.promotion_time && |
84 | dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) { | 84 | dev->states[last_idx + 1].exit_latency <= |
85 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
85 | last_state->stats.promotion_count++; | 86 | last_state->stats.promotion_count++; |
86 | last_state->stats.demotion_count = 0; | 87 | last_state->stats.demotion_count = 0; |
87 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 88 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 299d45c3bdd2..78d77c5dc35c 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/cpuidle.h> | 10 | #include <linux/cpuidle.h> |
11 | #include <linux/latency.h> | 11 | #include <linux/pm_qos_params.h> |
12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
13 | #include <linux/ktime.h> | 13 | #include <linux/ktime.h> |
14 | #include <linux/hrtimer.h> | 14 | #include <linux/hrtimer.h> |
@@ -48,7 +48,7 @@ static int menu_select(struct cpuidle_device *dev) | |||
48 | break; | 48 | break; |
49 | if (s->target_residency > data->predicted_us) | 49 | if (s->target_residency > data->predicted_us) |
50 | break; | 50 | break; |
51 | if (s->exit_latency > system_latency_constraint()) | 51 | if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) |
52 | break; | 52 | break; |
53 | } | 53 | } |
54 | 54 | ||
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 17502d6efae7..07f274f853d9 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c | |||
@@ -88,8 +88,6 @@ static struct dioname names[] = | |||
88 | #undef DIONAME | 88 | #undef DIONAME |
89 | #undef DIOFBNAME | 89 | #undef DIOFBNAME |
90 | 90 | ||
91 | #define NUMNAMES (sizeof(names) / sizeof(struct dioname)) | ||
92 | |||
93 | static const char *unknowndioname | 91 | static const char *unknowndioname |
94 | = "unknown DIO board -- please email <linux-m68k@lists.linux-m68k.org>!"; | 92 | = "unknown DIO board -- please email <linux-m68k@lists.linux-m68k.org>!"; |
95 | 93 | ||
@@ -97,7 +95,7 @@ static const char *dio_getname(int id) | |||
97 | { | 95 | { |
98 | /* return pointer to a constant string describing the board with given ID */ | 96 | /* return pointer to a constant string describing the board with given ID */ |
99 | unsigned int i; | 97 | unsigned int i; |
100 | for (i = 0; i < NUMNAMES; i++) | 98 | for (i = 0; i < ARRAY_SIZE(names); i++) |
101 | if (names[i].id == id) | 99 | if (names[i].id == id) |
102 | return names[i].name; | 100 | return names[i].name; |
103 | 101 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig new file mode 100644 index 000000000000..74fac0f5c348 --- /dev/null +++ b/drivers/gpio/Kconfig | |||
@@ -0,0 +1,72 @@ | |||
1 | # | ||
2 | # GPIO infrastructure and expanders | ||
3 | # | ||
4 | |||
5 | config HAVE_GPIO_LIB | ||
6 | bool | ||
7 | help | ||
8 | Platforms select gpiolib if they use this infrastructure | ||
9 | for all their GPIOs, usually starting with ones integrated | ||
10 | into SOC processors. | ||
11 | |||
12 | menu "GPIO Support" | ||
13 | depends on HAVE_GPIO_LIB | ||
14 | |||
15 | config DEBUG_GPIO | ||
16 | bool "Debug GPIO calls" | ||
17 | depends on DEBUG_KERNEL | ||
18 | help | ||
19 | Say Y here to add some extra checks and diagnostics to GPIO calls. | ||
20 | The checks help ensure that GPIOs have been properly initialized | ||
21 | before they are used and that sleeping calls aren not made from | ||
22 | nonsleeping contexts. They can make bitbanged serial protocols | ||
23 | slower. The diagnostics help catch the type of setup errors | ||
24 | that are most common when setting up new platforms or boards. | ||
25 | |||
26 | # put expanders in the right section, in alphabetical order | ||
27 | |||
28 | comment "I2C GPIO expanders:" | ||
29 | |||
30 | config GPIO_PCA9539 | ||
31 | tristate "PCA9539 16-bit I/O port" | ||
32 | depends on I2C | ||
33 | help | ||
34 | Say yes here to support the PCA9539 16-bit I/O port. These | ||
35 | parts are made by NXP and TI. | ||
36 | |||
37 | This driver can also be built as a module. If so, the module | ||
38 | will be called pca9539. | ||
39 | |||
40 | config GPIO_PCF857X | ||
41 | tristate "PCF857x, PCA857x, and PCA967x I2C GPIO expanders" | ||
42 | depends on I2C | ||
43 | help | ||
44 | Say yes here to provide access to most "quasi-bidirectional" I2C | ||
45 | GPIO expanders used for additional digital outputs or inputs. | ||
46 | Most of these parts are from NXP, though TI is a second source for | ||
47 | some of them. Compatible models include: | ||
48 | |||
49 | 8 bits: pcf8574, pcf8574a, pca8574, pca8574a, | ||
50 | pca9670, pca9672, pca9674, pca9674a | ||
51 | |||
52 | 16 bits: pcf8575, pcf8575c, pca8575, | ||
53 | pca9671, pca9673, pca9675 | ||
54 | |||
55 | Your board setup code will need to declare the expanders in | ||
56 | use, and assign numbers to the GPIOs they expose. Those GPIOs | ||
57 | can then be used from drivers and other kernel code, just like | ||
58 | other GPIOs, but only accessible from task contexts. | ||
59 | |||
60 | This driver provides an in-kernel interface to those GPIOs using | ||
61 | platform-neutral GPIO calls. | ||
62 | |||
63 | comment "SPI GPIO expanders:" | ||
64 | |||
65 | config GPIO_MCP23S08 | ||
66 | tristate "Microchip MCP23S08 I/O expander" | ||
67 | depends on SPI_MASTER | ||
68 | help | ||
69 | SPI driver for Microchip MCP23S08 I/O expander. This provides | ||
70 | a GPIO interface supporting inputs and outputs. | ||
71 | |||
72 | endmenu | ||
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile new file mode 100644 index 000000000000..470ecd6aa778 --- /dev/null +++ b/drivers/gpio/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # gpio support: dedicated expander chips, etc | ||
2 | |||
3 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | ||
4 | |||
5 | obj-$(CONFIG_HAVE_GPIO_LIB) += gpiolib.o | ||
6 | |||
7 | obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o | ||
8 | obj-$(CONFIG_GPIO_PCA9539) += pca9539.o | ||
9 | obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c new file mode 100644 index 000000000000..d8db2f8ee411 --- /dev/null +++ b/drivers/gpio/gpiolib.c | |||
@@ -0,0 +1,567 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/irq.h> | ||
4 | #include <linux/spinlock.h> | ||
5 | |||
6 | #include <asm/gpio.h> | ||
7 | |||
8 | |||
9 | /* Optional implementation infrastructure for GPIO interfaces. | ||
10 | * | ||
11 | * Platforms may want to use this if they tend to use very many GPIOs | ||
12 | * that aren't part of a System-On-Chip core; or across I2C/SPI/etc. | ||
13 | * | ||
14 | * When kernel footprint or instruction count is an issue, simpler | ||
15 | * implementations may be preferred. The GPIO programming interface | ||
16 | * allows for inlining speed-critical get/set operations for common | ||
17 | * cases, so that access to SOC-integrated GPIOs can sometimes cost | ||
18 | * only an instruction or two per bit. | ||
19 | */ | ||
20 | |||
21 | |||
22 | /* When debugging, extend minimal trust to callers and platform code. | ||
23 | * Also emit diagnostic messages that may help initial bringup, when | ||
24 | * board setup or driver bugs are most common. | ||
25 | * | ||
26 | * Otherwise, minimize overhead in what may be bitbanging codepaths. | ||
27 | */ | ||
28 | #ifdef DEBUG | ||
29 | #define extra_checks 1 | ||
30 | #else | ||
31 | #define extra_checks 0 | ||
32 | #endif | ||
33 | |||
34 | /* gpio_lock prevents conflicts during gpio_desc[] table updates. | ||
35 | * While any GPIO is requested, its gpio_chip is not removable; | ||
36 | * each GPIO's "requested" flag serves as a lock and refcount. | ||
37 | */ | ||
38 | static DEFINE_SPINLOCK(gpio_lock); | ||
39 | |||
40 | struct gpio_desc { | ||
41 | struct gpio_chip *chip; | ||
42 | unsigned long flags; | ||
43 | /* flag symbols are bit numbers */ | ||
44 | #define FLAG_REQUESTED 0 | ||
45 | #define FLAG_IS_OUT 1 | ||
46 | |||
47 | #ifdef CONFIG_DEBUG_FS | ||
48 | const char *label; | ||
49 | #endif | ||
50 | }; | ||
51 | static struct gpio_desc gpio_desc[ARCH_NR_GPIOS]; | ||
52 | |||
53 | static inline void desc_set_label(struct gpio_desc *d, const char *label) | ||
54 | { | ||
55 | #ifdef CONFIG_DEBUG_FS | ||
56 | d->label = label; | ||
57 | #endif | ||
58 | } | ||
59 | |||
60 | /* Warn when drivers omit gpio_request() calls -- legal but ill-advised | ||
61 | * when setting direction, and otherwise illegal. Until board setup code | ||
62 | * and drivers use explicit requests everywhere (which won't happen when | ||
63 | * those calls have no teeth) we can't avoid autorequesting. This nag | ||
64 | * message should motivate switching to explicit requests... | ||
65 | */ | ||
66 | static void gpio_ensure_requested(struct gpio_desc *desc) | ||
67 | { | ||
68 | if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { | ||
69 | pr_warning("GPIO-%d autorequested\n", (int)(desc - gpio_desc)); | ||
70 | desc_set_label(desc, "[auto]"); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | /* caller holds gpio_lock *OR* gpio is marked as requested */ | ||
75 | static inline struct gpio_chip *gpio_to_chip(unsigned gpio) | ||
76 | { | ||
77 | return gpio_desc[gpio].chip; | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * gpiochip_add() - register a gpio_chip | ||
82 | * @chip: the chip to register, with chip->base initialized | ||
83 | * Context: potentially before irqs or kmalloc will work | ||
84 | * | ||
85 | * Returns a negative errno if the chip can't be registered, such as | ||
86 | * because the chip->base is invalid or already associated with a | ||
87 | * different chip. Otherwise it returns zero as a success code. | ||
88 | */ | ||
89 | int gpiochip_add(struct gpio_chip *chip) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | int status = 0; | ||
93 | unsigned id; | ||
94 | |||
95 | /* NOTE chip->base negative is reserved to mean a request for | ||
96 | * dynamic allocation. We don't currently support that. | ||
97 | */ | ||
98 | |||
99 | if (chip->base < 0 || (chip->base + chip->ngpio) >= ARCH_NR_GPIOS) { | ||
100 | status = -EINVAL; | ||
101 | goto fail; | ||
102 | } | ||
103 | |||
104 | spin_lock_irqsave(&gpio_lock, flags); | ||
105 | |||
106 | /* these GPIO numbers must not be managed by another gpio_chip */ | ||
107 | for (id = chip->base; id < chip->base + chip->ngpio; id++) { | ||
108 | if (gpio_desc[id].chip != NULL) { | ||
109 | status = -EBUSY; | ||
110 | break; | ||
111 | } | ||
112 | } | ||
113 | if (status == 0) { | ||
114 | for (id = chip->base; id < chip->base + chip->ngpio; id++) { | ||
115 | gpio_desc[id].chip = chip; | ||
116 | gpio_desc[id].flags = 0; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
121 | fail: | ||
122 | /* failures here can mean systems won't boot... */ | ||
123 | if (status) | ||
124 | pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", | ||
125 | chip->base, chip->base + chip->ngpio, | ||
126 | chip->label ? : "generic"); | ||
127 | return status; | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(gpiochip_add); | ||
130 | |||
131 | /** | ||
132 | * gpiochip_remove() - unregister a gpio_chip | ||
133 | * @chip: the chip to unregister | ||
134 | * | ||
135 | * A gpio_chip with any GPIOs still requested may not be removed. | ||
136 | */ | ||
137 | int gpiochip_remove(struct gpio_chip *chip) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | int status = 0; | ||
141 | unsigned id; | ||
142 | |||
143 | spin_lock_irqsave(&gpio_lock, flags); | ||
144 | |||
145 | for (id = chip->base; id < chip->base + chip->ngpio; id++) { | ||
146 | if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) { | ||
147 | status = -EBUSY; | ||
148 | break; | ||
149 | } | ||
150 | } | ||
151 | if (status == 0) { | ||
152 | for (id = chip->base; id < chip->base + chip->ngpio; id++) | ||
153 | gpio_desc[id].chip = NULL; | ||
154 | } | ||
155 | |||
156 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
157 | return status; | ||
158 | } | ||
159 | EXPORT_SYMBOL_GPL(gpiochip_remove); | ||
160 | |||
161 | |||
162 | /* These "optional" allocation calls help prevent drivers from stomping | ||
163 | * on each other, and help provide better diagnostics in debugfs. | ||
164 | * They're called even less than the "set direction" calls. | ||
165 | */ | ||
166 | int gpio_request(unsigned gpio, const char *label) | ||
167 | { | ||
168 | struct gpio_desc *desc; | ||
169 | int status = -EINVAL; | ||
170 | unsigned long flags; | ||
171 | |||
172 | spin_lock_irqsave(&gpio_lock, flags); | ||
173 | |||
174 | if (gpio >= ARCH_NR_GPIOS) | ||
175 | goto done; | ||
176 | desc = &gpio_desc[gpio]; | ||
177 | if (desc->chip == NULL) | ||
178 | goto done; | ||
179 | |||
180 | /* NOTE: gpio_request() can be called in early boot, | ||
181 | * before IRQs are enabled. | ||
182 | */ | ||
183 | |||
184 | if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { | ||
185 | desc_set_label(desc, label ? : "?"); | ||
186 | status = 0; | ||
187 | } else | ||
188 | status = -EBUSY; | ||
189 | |||
190 | done: | ||
191 | if (status) | ||
192 | pr_debug("gpio_request: gpio-%d (%s) status %d\n", | ||
193 | gpio, label ? : "?", status); | ||
194 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
195 | return status; | ||
196 | } | ||
197 | EXPORT_SYMBOL_GPL(gpio_request); | ||
198 | |||
199 | void gpio_free(unsigned gpio) | ||
200 | { | ||
201 | unsigned long flags; | ||
202 | struct gpio_desc *desc; | ||
203 | |||
204 | if (gpio >= ARCH_NR_GPIOS) { | ||
205 | WARN_ON(extra_checks); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | spin_lock_irqsave(&gpio_lock, flags); | ||
210 | |||
211 | desc = &gpio_desc[gpio]; | ||
212 | if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags)) | ||
213 | desc_set_label(desc, NULL); | ||
214 | else | ||
215 | WARN_ON(extra_checks); | ||
216 | |||
217 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
218 | } | ||
219 | EXPORT_SYMBOL_GPL(gpio_free); | ||
220 | |||
221 | |||
222 | /** | ||
223 | * gpiochip_is_requested - return string iff signal was requested | ||
224 | * @chip: controller managing the signal | ||
225 | * @offset: of signal within controller's 0..(ngpio - 1) range | ||
226 | * | ||
227 | * Returns NULL if the GPIO is not currently requested, else a string. | ||
228 | * If debugfs support is enabled, the string returned is the label passed | ||
229 | * to gpio_request(); otherwise it is a meaningless constant. | ||
230 | * | ||
231 | * This function is for use by GPIO controller drivers. The label can | ||
232 | * help with diagnostics, and knowing that the signal is used as a GPIO | ||
233 | * can help avoid accidentally multiplexing it to another controller. | ||
234 | */ | ||
235 | const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) | ||
236 | { | ||
237 | unsigned gpio = chip->base + offset; | ||
238 | |||
239 | if (gpio >= ARCH_NR_GPIOS || gpio_desc[gpio].chip != chip) | ||
240 | return NULL; | ||
241 | if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0) | ||
242 | return NULL; | ||
243 | #ifdef CONFIG_DEBUG_FS | ||
244 | return gpio_desc[gpio].label; | ||
245 | #else | ||
246 | return "?"; | ||
247 | #endif | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(gpiochip_is_requested); | ||
250 | |||
251 | |||
252 | /* Drivers MUST set GPIO direction before making get/set calls. In | ||
253 | * some cases this is done in early boot, before IRQs are enabled. | ||
254 | * | ||
255 | * As a rule these aren't called more than once (except for drivers | ||
256 | * using the open-drain emulation idiom) so these are natural places | ||
257 | * to accumulate extra debugging checks. Note that we can't (yet) | ||
258 | * rely on gpio_request() having been called beforehand. | ||
259 | */ | ||
260 | |||
261 | int gpio_direction_input(unsigned gpio) | ||
262 | { | ||
263 | unsigned long flags; | ||
264 | struct gpio_chip *chip; | ||
265 | struct gpio_desc *desc = &gpio_desc[gpio]; | ||
266 | int status = -EINVAL; | ||
267 | |||
268 | spin_lock_irqsave(&gpio_lock, flags); | ||
269 | |||
270 | if (gpio >= ARCH_NR_GPIOS) | ||
271 | goto fail; | ||
272 | chip = desc->chip; | ||
273 | if (!chip || !chip->get || !chip->direction_input) | ||
274 | goto fail; | ||
275 | gpio -= chip->base; | ||
276 | if (gpio >= chip->ngpio) | ||
277 | goto fail; | ||
278 | gpio_ensure_requested(desc); | ||
279 | |||
280 | /* now we know the gpio is valid and chip won't vanish */ | ||
281 | |||
282 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
283 | |||
284 | might_sleep_if(extra_checks && chip->can_sleep); | ||
285 | |||
286 | status = chip->direction_input(chip, gpio); | ||
287 | if (status == 0) | ||
288 | clear_bit(FLAG_IS_OUT, &desc->flags); | ||
289 | return status; | ||
290 | fail: | ||
291 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
292 | if (status) | ||
293 | pr_debug("%s: gpio-%d status %d\n", | ||
294 | __FUNCTION__, gpio, status); | ||
295 | return status; | ||
296 | } | ||
297 | EXPORT_SYMBOL_GPL(gpio_direction_input); | ||
298 | |||
299 | int gpio_direction_output(unsigned gpio, int value) | ||
300 | { | ||
301 | unsigned long flags; | ||
302 | struct gpio_chip *chip; | ||
303 | struct gpio_desc *desc = &gpio_desc[gpio]; | ||
304 | int status = -EINVAL; | ||
305 | |||
306 | spin_lock_irqsave(&gpio_lock, flags); | ||
307 | |||
308 | if (gpio >= ARCH_NR_GPIOS) | ||
309 | goto fail; | ||
310 | chip = desc->chip; | ||
311 | if (!chip || !chip->set || !chip->direction_output) | ||
312 | goto fail; | ||
313 | gpio -= chip->base; | ||
314 | if (gpio >= chip->ngpio) | ||
315 | goto fail; | ||
316 | gpio_ensure_requested(desc); | ||
317 | |||
318 | /* now we know the gpio is valid and chip won't vanish */ | ||
319 | |||
320 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
321 | |||
322 | might_sleep_if(extra_checks && chip->can_sleep); | ||
323 | |||
324 | status = chip->direction_output(chip, gpio, value); | ||
325 | if (status == 0) | ||
326 | set_bit(FLAG_IS_OUT, &desc->flags); | ||
327 | return status; | ||
328 | fail: | ||
329 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
330 | if (status) | ||
331 | pr_debug("%s: gpio-%d status %d\n", | ||
332 | __FUNCTION__, gpio, status); | ||
333 | return status; | ||
334 | } | ||
335 | EXPORT_SYMBOL_GPL(gpio_direction_output); | ||
336 | |||
337 | |||
338 | /* I/O calls are only valid after configuration completed; the relevant | ||
339 | * "is this a valid GPIO" error checks should already have been done. | ||
340 | * | ||
341 | * "Get" operations are often inlinable as reading a pin value register, | ||
342 | * and masking the relevant bit in that register. | ||
343 | * | ||
344 | * When "set" operations are inlinable, they involve writing that mask to | ||
345 | * one register to set a low value, or a different register to set it high. | ||
346 | * Otherwise locking is needed, so there may be little value to inlining. | ||
347 | * | ||
348 | *------------------------------------------------------------------------ | ||
349 | * | ||
350 | * IMPORTANT!!! The hot paths -- get/set value -- assume that callers | ||
351 | * have requested the GPIO. That can include implicit requesting by | ||
352 | * a direction setting call. Marking a gpio as requested locks its chip | ||
353 | * in memory, guaranteeing that these table lookups need no more locking | ||
354 | * and that gpiochip_remove() will fail. | ||
355 | * | ||
356 | * REVISIT when debugging, consider adding some instrumentation to ensure | ||
357 | * that the GPIO was actually requested. | ||
358 | */ | ||
359 | |||
360 | /** | ||
361 | * __gpio_get_value() - return a gpio's value | ||
362 | * @gpio: gpio whose value will be returned | ||
363 | * Context: any | ||
364 | * | ||
365 | * This is used directly or indirectly to implement gpio_get_value(). | ||
366 | * It returns the zero or nonzero value provided by the associated | ||
367 | * gpio_chip.get() method; or zero if no such method is provided. | ||
368 | */ | ||
369 | int __gpio_get_value(unsigned gpio) | ||
370 | { | ||
371 | struct gpio_chip *chip; | ||
372 | |||
373 | chip = gpio_to_chip(gpio); | ||
374 | WARN_ON(extra_checks && chip->can_sleep); | ||
375 | return chip->get ? chip->get(chip, gpio - chip->base) : 0; | ||
376 | } | ||
377 | EXPORT_SYMBOL_GPL(__gpio_get_value); | ||
378 | |||
379 | /** | ||
380 | * __gpio_set_value() - assign a gpio's value | ||
381 | * @gpio: gpio whose value will be assigned | ||
382 | * @value: value to assign | ||
383 | * Context: any | ||
384 | * | ||
385 | * This is used directly or indirectly to implement gpio_set_value(). | ||
386 | * It invokes the associated gpio_chip.set() method. | ||
387 | */ | ||
388 | void __gpio_set_value(unsigned gpio, int value) | ||
389 | { | ||
390 | struct gpio_chip *chip; | ||
391 | |||
392 | chip = gpio_to_chip(gpio); | ||
393 | WARN_ON(extra_checks && chip->can_sleep); | ||
394 | chip->set(chip, gpio - chip->base, value); | ||
395 | } | ||
396 | EXPORT_SYMBOL_GPL(__gpio_set_value); | ||
397 | |||
398 | /** | ||
399 | * __gpio_cansleep() - report whether gpio value access will sleep | ||
400 | * @gpio: gpio in question | ||
401 | * Context: any | ||
402 | * | ||
403 | * This is used directly or indirectly to implement gpio_cansleep(). It | ||
404 | * returns nonzero if access reading or writing the GPIO value can sleep. | ||
405 | */ | ||
406 | int __gpio_cansleep(unsigned gpio) | ||
407 | { | ||
408 | struct gpio_chip *chip; | ||
409 | |||
410 | /* only call this on GPIOs that are valid! */ | ||
411 | chip = gpio_to_chip(gpio); | ||
412 | |||
413 | return chip->can_sleep; | ||
414 | } | ||
415 | EXPORT_SYMBOL_GPL(__gpio_cansleep); | ||
416 | |||
417 | |||
418 | |||
419 | /* There's no value in making it easy to inline GPIO calls that may sleep. | ||
420 | * Common examples include ones connected to I2C or SPI chips. | ||
421 | */ | ||
422 | |||
423 | int gpio_get_value_cansleep(unsigned gpio) | ||
424 | { | ||
425 | struct gpio_chip *chip; | ||
426 | |||
427 | might_sleep_if(extra_checks); | ||
428 | chip = gpio_to_chip(gpio); | ||
429 | return chip->get(chip, gpio - chip->base); | ||
430 | } | ||
431 | EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); | ||
432 | |||
433 | void gpio_set_value_cansleep(unsigned gpio, int value) | ||
434 | { | ||
435 | struct gpio_chip *chip; | ||
436 | |||
437 | might_sleep_if(extra_checks); | ||
438 | chip = gpio_to_chip(gpio); | ||
439 | chip->set(chip, gpio - chip->base, value); | ||
440 | } | ||
441 | EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); | ||
442 | |||
443 | |||
444 | #ifdef CONFIG_DEBUG_FS | ||
445 | |||
446 | #include <linux/debugfs.h> | ||
447 | #include <linux/seq_file.h> | ||
448 | |||
449 | |||
450 | static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) | ||
451 | { | ||
452 | unsigned i; | ||
453 | unsigned gpio = chip->base; | ||
454 | struct gpio_desc *gdesc = &gpio_desc[gpio]; | ||
455 | int is_out; | ||
456 | |||
457 | for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) { | ||
458 | if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) | ||
459 | continue; | ||
460 | |||
461 | is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); | ||
462 | seq_printf(s, " gpio-%-3d (%-12s) %s %s", | ||
463 | gpio, gdesc->label, | ||
464 | is_out ? "out" : "in ", | ||
465 | chip->get | ||
466 | ? (chip->get(chip, i) ? "hi" : "lo") | ||
467 | : "? "); | ||
468 | |||
469 | if (!is_out) { | ||
470 | int irq = gpio_to_irq(gpio); | ||
471 | struct irq_desc *desc = irq_desc + irq; | ||
472 | |||
473 | /* This races with request_irq(), set_irq_type(), | ||
474 | * and set_irq_wake() ... but those are "rare". | ||
475 | * | ||
476 | * More significantly, trigger type flags aren't | ||
477 | * currently maintained by genirq. | ||
478 | */ | ||
479 | if (irq >= 0 && desc->action) { | ||
480 | char *trigger; | ||
481 | |||
482 | switch (desc->status & IRQ_TYPE_SENSE_MASK) { | ||
483 | case IRQ_TYPE_NONE: | ||
484 | trigger = "(default)"; | ||
485 | break; | ||
486 | case IRQ_TYPE_EDGE_FALLING: | ||
487 | trigger = "edge-falling"; | ||
488 | break; | ||
489 | case IRQ_TYPE_EDGE_RISING: | ||
490 | trigger = "edge-rising"; | ||
491 | break; | ||
492 | case IRQ_TYPE_EDGE_BOTH: | ||
493 | trigger = "edge-both"; | ||
494 | break; | ||
495 | case IRQ_TYPE_LEVEL_HIGH: | ||
496 | trigger = "level-high"; | ||
497 | break; | ||
498 | case IRQ_TYPE_LEVEL_LOW: | ||
499 | trigger = "level-low"; | ||
500 | break; | ||
501 | default: | ||
502 | trigger = "?trigger?"; | ||
503 | break; | ||
504 | } | ||
505 | |||
506 | seq_printf(s, " irq-%d %s%s", | ||
507 | irq, trigger, | ||
508 | (desc->status & IRQ_WAKEUP) | ||
509 | ? " wakeup" : ""); | ||
510 | } | ||
511 | } | ||
512 | |||
513 | seq_printf(s, "\n"); | ||
514 | } | ||
515 | } | ||
516 | |||
517 | static int gpiolib_show(struct seq_file *s, void *unused) | ||
518 | { | ||
519 | struct gpio_chip *chip = NULL; | ||
520 | unsigned gpio; | ||
521 | int started = 0; | ||
522 | |||
523 | /* REVISIT this isn't locked against gpio_chip removal ... */ | ||
524 | |||
525 | for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) { | ||
526 | if (chip == gpio_desc[gpio].chip) | ||
527 | continue; | ||
528 | chip = gpio_desc[gpio].chip; | ||
529 | if (!chip) | ||
530 | continue; | ||
531 | |||
532 | seq_printf(s, "%sGPIOs %d-%d, %s%s:\n", | ||
533 | started ? "\n" : "", | ||
534 | chip->base, chip->base + chip->ngpio - 1, | ||
535 | chip->label ? : "generic", | ||
536 | chip->can_sleep ? ", can sleep" : ""); | ||
537 | started = 1; | ||
538 | if (chip->dbg_show) | ||
539 | chip->dbg_show(s, chip); | ||
540 | else | ||
541 | gpiolib_dbg_show(s, chip); | ||
542 | } | ||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | static int gpiolib_open(struct inode *inode, struct file *file) | ||
547 | { | ||
548 | return single_open(file, gpiolib_show, NULL); | ||
549 | } | ||
550 | |||
551 | static struct file_operations gpiolib_operations = { | ||
552 | .open = gpiolib_open, | ||
553 | .read = seq_read, | ||
554 | .llseek = seq_lseek, | ||
555 | .release = single_release, | ||
556 | }; | ||
557 | |||
558 | static int __init gpiolib_debugfs_init(void) | ||
559 | { | ||
560 | /* /sys/kernel/debug/gpio */ | ||
561 | (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO, | ||
562 | NULL, NULL, &gpiolib_operations); | ||
563 | return 0; | ||
564 | } | ||
565 | subsys_initcall(gpiolib_debugfs_init); | ||
566 | |||
567 | #endif /* DEBUG_FS */ | ||
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c new file mode 100644 index 000000000000..bb60e8c1a1f0 --- /dev/null +++ b/drivers/gpio/mcp23s08.c | |||
@@ -0,0 +1,357 @@ | |||
1 | /* | ||
2 | * mcp23s08.c - SPI gpio expander driver | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/device.h> | ||
7 | #include <linux/workqueue.h> | ||
8 | #include <linux/mutex.h> | ||
9 | |||
10 | #include <linux/spi/spi.h> | ||
11 | #include <linux/spi/mcp23s08.h> | ||
12 | |||
13 | #include <asm/gpio.h> | ||
14 | |||
15 | |||
16 | /* Registers are all 8 bits wide. | ||
17 | * | ||
18 | * The mcp23s17 has twice as many bits, and can be configured to work | ||
19 | * with either 16 bit registers or with two adjacent 8 bit banks. | ||
20 | * | ||
21 | * Also, there are I2C versions of both chips. | ||
22 | */ | ||
23 | #define MCP_IODIR 0x00 /* init/reset: all ones */ | ||
24 | #define MCP_IPOL 0x01 | ||
25 | #define MCP_GPINTEN 0x02 | ||
26 | #define MCP_DEFVAL 0x03 | ||
27 | #define MCP_INTCON 0x04 | ||
28 | #define MCP_IOCON 0x05 | ||
29 | # define IOCON_SEQOP (1 << 5) | ||
30 | # define IOCON_HAEN (1 << 3) | ||
31 | # define IOCON_ODR (1 << 2) | ||
32 | # define IOCON_INTPOL (1 << 1) | ||
33 | #define MCP_GPPU 0x06 | ||
34 | #define MCP_INTF 0x07 | ||
35 | #define MCP_INTCAP 0x08 | ||
36 | #define MCP_GPIO 0x09 | ||
37 | #define MCP_OLAT 0x0a | ||
38 | |||
39 | struct mcp23s08 { | ||
40 | struct spi_device *spi; | ||
41 | u8 addr; | ||
42 | |||
43 | /* lock protects the cached values */ | ||
44 | struct mutex lock; | ||
45 | u8 cache[11]; | ||
46 | |||
47 | struct gpio_chip chip; | ||
48 | |||
49 | struct work_struct work; | ||
50 | }; | ||
51 | |||
52 | static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg) | ||
53 | { | ||
54 | u8 tx[2], rx[1]; | ||
55 | int status; | ||
56 | |||
57 | tx[0] = mcp->addr | 0x01; | ||
58 | tx[1] = reg; | ||
59 | status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx); | ||
60 | return (status < 0) ? status : rx[0]; | ||
61 | } | ||
62 | |||
63 | static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, u8 val) | ||
64 | { | ||
65 | u8 tx[3]; | ||
66 | |||
67 | tx[0] = mcp->addr; | ||
68 | tx[1] = reg; | ||
69 | tx[2] = val; | ||
70 | return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0); | ||
71 | } | ||
72 | |||
73 | static int | ||
74 | mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u8 *vals, unsigned n) | ||
75 | { | ||
76 | u8 tx[2]; | ||
77 | |||
78 | if ((n + reg) > sizeof mcp->cache) | ||
79 | return -EINVAL; | ||
80 | tx[0] = mcp->addr | 0x01; | ||
81 | tx[1] = reg; | ||
82 | return spi_write_then_read(mcp->spi, tx, sizeof tx, vals, n); | ||
83 | } | ||
84 | |||
85 | /*----------------------------------------------------------------------*/ | ||
86 | |||
87 | static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset) | ||
88 | { | ||
89 | struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip); | ||
90 | int status; | ||
91 | |||
92 | mutex_lock(&mcp->lock); | ||
93 | mcp->cache[MCP_IODIR] |= (1 << offset); | ||
94 | status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]); | ||
95 | mutex_unlock(&mcp->lock); | ||
96 | return status; | ||
97 | } | ||
98 | |||
99 | static int mcp23s08_get(struct gpio_chip *chip, unsigned offset) | ||
100 | { | ||
101 | struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip); | ||
102 | int status; | ||
103 | |||
104 | mutex_lock(&mcp->lock); | ||
105 | |||
106 | /* REVISIT reading this clears any IRQ ... */ | ||
107 | status = mcp23s08_read(mcp, MCP_GPIO); | ||
108 | if (status < 0) | ||
109 | status = 0; | ||
110 | else { | ||
111 | mcp->cache[MCP_GPIO] = status; | ||
112 | status = !!(status & (1 << offset)); | ||
113 | } | ||
114 | mutex_unlock(&mcp->lock); | ||
115 | return status; | ||
116 | } | ||
117 | |||
118 | static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, int value) | ||
119 | { | ||
120 | u8 olat = mcp->cache[MCP_OLAT]; | ||
121 | |||
122 | if (value) | ||
123 | olat |= mask; | ||
124 | else | ||
125 | olat &= ~mask; | ||
126 | mcp->cache[MCP_OLAT] = olat; | ||
127 | return mcp23s08_write(mcp, MCP_OLAT, olat); | ||
128 | } | ||
129 | |||
130 | static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value) | ||
131 | { | ||
132 | struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip); | ||
133 | u8 mask = 1 << offset; | ||
134 | |||
135 | mutex_lock(&mcp->lock); | ||
136 | __mcp23s08_set(mcp, mask, value); | ||
137 | mutex_unlock(&mcp->lock); | ||
138 | } | ||
139 | |||
140 | static int | ||
141 | mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value) | ||
142 | { | ||
143 | struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip); | ||
144 | u8 mask = 1 << offset; | ||
145 | int status; | ||
146 | |||
147 | mutex_lock(&mcp->lock); | ||
148 | status = __mcp23s08_set(mcp, mask, value); | ||
149 | if (status == 0) { | ||
150 | mcp->cache[MCP_IODIR] &= ~mask; | ||
151 | status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]); | ||
152 | } | ||
153 | mutex_unlock(&mcp->lock); | ||
154 | return status; | ||
155 | } | ||
156 | |||
157 | /*----------------------------------------------------------------------*/ | ||
158 | |||
159 | #ifdef CONFIG_DEBUG_FS | ||
160 | |||
161 | #include <linux/seq_file.h> | ||
162 | |||
163 | /* | ||
164 | * This shows more info than the generic gpio dump code: | ||
165 | * pullups, deglitching, open drain drive. | ||
166 | */ | ||
167 | static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip) | ||
168 | { | ||
169 | struct mcp23s08 *mcp; | ||
170 | char bank; | ||
171 | unsigned t; | ||
172 | unsigned mask; | ||
173 | |||
174 | mcp = container_of(chip, struct mcp23s08, chip); | ||
175 | |||
176 | /* NOTE: we only handle one bank for now ... */ | ||
177 | bank = '0' + ((mcp->addr >> 1) & 0x3); | ||
178 | |||
179 | mutex_lock(&mcp->lock); | ||
180 | t = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache); | ||
181 | if (t < 0) { | ||
182 | seq_printf(s, " I/O ERROR %d\n", t); | ||
183 | goto done; | ||
184 | } | ||
185 | |||
186 | for (t = 0, mask = 1; t < 8; t++, mask <<= 1) { | ||
187 | const char *label; | ||
188 | |||
189 | label = gpiochip_is_requested(chip, t); | ||
190 | if (!label) | ||
191 | continue; | ||
192 | |||
193 | seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s", | ||
194 | chip->base + t, bank, t, label, | ||
195 | (mcp->cache[MCP_IODIR] & mask) ? "in " : "out", | ||
196 | (mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo", | ||
197 | (mcp->cache[MCP_GPPU] & mask) ? " " : "up"); | ||
198 | /* NOTE: ignoring the irq-related registers */ | ||
199 | seq_printf(s, "\n"); | ||
200 | } | ||
201 | done: | ||
202 | mutex_unlock(&mcp->lock); | ||
203 | } | ||
204 | |||
205 | #else | ||
206 | #define mcp23s08_dbg_show NULL | ||
207 | #endif | ||
208 | |||
209 | /*----------------------------------------------------------------------*/ | ||
210 | |||
211 | static int mcp23s08_probe(struct spi_device *spi) | ||
212 | { | ||
213 | struct mcp23s08 *mcp; | ||
214 | struct mcp23s08_platform_data *pdata; | ||
215 | int status; | ||
216 | int do_update = 0; | ||
217 | |||
218 | pdata = spi->dev.platform_data; | ||
219 | if (!pdata || pdata->slave > 3 || !pdata->base) | ||
220 | return -ENODEV; | ||
221 | |||
222 | mcp = kzalloc(sizeof *mcp, GFP_KERNEL); | ||
223 | if (!mcp) | ||
224 | return -ENOMEM; | ||
225 | |||
226 | mutex_init(&mcp->lock); | ||
227 | |||
228 | mcp->spi = spi; | ||
229 | mcp->addr = 0x40 | (pdata->slave << 1); | ||
230 | |||
231 | mcp->chip.label = "mcp23s08", | ||
232 | |||
233 | mcp->chip.direction_input = mcp23s08_direction_input; | ||
234 | mcp->chip.get = mcp23s08_get; | ||
235 | mcp->chip.direction_output = mcp23s08_direction_output; | ||
236 | mcp->chip.set = mcp23s08_set; | ||
237 | mcp->chip.dbg_show = mcp23s08_dbg_show; | ||
238 | |||
239 | mcp->chip.base = pdata->base; | ||
240 | mcp->chip.ngpio = 8; | ||
241 | mcp->chip.can_sleep = 1; | ||
242 | |||
243 | spi_set_drvdata(spi, mcp); | ||
244 | |||
245 | /* verify MCP_IOCON.SEQOP = 0, so sequential reads work */ | ||
246 | status = mcp23s08_read(mcp, MCP_IOCON); | ||
247 | if (status < 0) | ||
248 | goto fail; | ||
249 | if (status & IOCON_SEQOP) { | ||
250 | status &= ~IOCON_SEQOP; | ||
251 | status = mcp23s08_write(mcp, MCP_IOCON, (u8) status); | ||
252 | if (status < 0) | ||
253 | goto fail; | ||
254 | } | ||
255 | |||
256 | /* configure ~100K pullups */ | ||
257 | status = mcp23s08_write(mcp, MCP_GPPU, pdata->pullups); | ||
258 | if (status < 0) | ||
259 | goto fail; | ||
260 | |||
261 | status = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache); | ||
262 | if (status < 0) | ||
263 | goto fail; | ||
264 | |||
265 | /* disable inverter on input */ | ||
266 | if (mcp->cache[MCP_IPOL] != 0) { | ||
267 | mcp->cache[MCP_IPOL] = 0; | ||
268 | do_update = 1; | ||
269 | } | ||
270 | |||
271 | /* disable irqs */ | ||
272 | if (mcp->cache[MCP_GPINTEN] != 0) { | ||
273 | mcp->cache[MCP_GPINTEN] = 0; | ||
274 | do_update = 1; | ||
275 | } | ||
276 | |||
277 | if (do_update) { | ||
278 | u8 tx[4]; | ||
279 | |||
280 | tx[0] = mcp->addr; | ||
281 | tx[1] = MCP_IPOL; | ||
282 | memcpy(&tx[2], &mcp->cache[MCP_IPOL], sizeof(tx) - 2); | ||
283 | status = spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0); | ||
284 | |||
285 | /* FIXME check status... */ | ||
286 | } | ||
287 | |||
288 | status = gpiochip_add(&mcp->chip); | ||
289 | |||
290 | /* NOTE: these chips have a relatively sane IRQ framework, with | ||
291 | * per-signal masking and level/edge triggering. It's not yet | ||
292 | * handled here... | ||
293 | */ | ||
294 | |||
295 | if (pdata->setup) { | ||
296 | status = pdata->setup(spi, mcp->chip.base, | ||
297 | mcp->chip.ngpio, pdata->context); | ||
298 | if (status < 0) | ||
299 | dev_dbg(&spi->dev, "setup --> %d\n", status); | ||
300 | } | ||
301 | |||
302 | return 0; | ||
303 | |||
304 | fail: | ||
305 | kfree(mcp); | ||
306 | return status; | ||
307 | } | ||
308 | |||
309 | static int mcp23s08_remove(struct spi_device *spi) | ||
310 | { | ||
311 | struct mcp23s08 *mcp = spi_get_drvdata(spi); | ||
312 | struct mcp23s08_platform_data *pdata = spi->dev.platform_data; | ||
313 | int status = 0; | ||
314 | |||
315 | if (pdata->teardown) { | ||
316 | status = pdata->teardown(spi, | ||
317 | mcp->chip.base, mcp->chip.ngpio, | ||
318 | pdata->context); | ||
319 | if (status < 0) { | ||
320 | dev_err(&spi->dev, "%s --> %d\n", "teardown", status); | ||
321 | return status; | ||
322 | } | ||
323 | } | ||
324 | |||
325 | status = gpiochip_remove(&mcp->chip); | ||
326 | if (status == 0) | ||
327 | kfree(mcp); | ||
328 | else | ||
329 | dev_err(&spi->dev, "%s --> %d\n", "remove", status); | ||
330 | return status; | ||
331 | } | ||
332 | |||
333 | static struct spi_driver mcp23s08_driver = { | ||
334 | .probe = mcp23s08_probe, | ||
335 | .remove = mcp23s08_remove, | ||
336 | .driver = { | ||
337 | .name = "mcp23s08", | ||
338 | .owner = THIS_MODULE, | ||
339 | }, | ||
340 | }; | ||
341 | |||
342 | /*----------------------------------------------------------------------*/ | ||
343 | |||
344 | static int __init mcp23s08_init(void) | ||
345 | { | ||
346 | return spi_register_driver(&mcp23s08_driver); | ||
347 | } | ||
348 | module_init(mcp23s08_init); | ||
349 | |||
350 | static void __exit mcp23s08_exit(void) | ||
351 | { | ||
352 | spi_unregister_driver(&mcp23s08_driver); | ||
353 | } | ||
354 | module_exit(mcp23s08_exit); | ||
355 | |||
356 | MODULE_LICENSE("GPL"); | ||
357 | |||
diff --git a/drivers/gpio/pca9539.c b/drivers/gpio/pca9539.c new file mode 100644 index 000000000000..3e85c92a7d59 --- /dev/null +++ b/drivers/gpio/pca9539.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * pca9539.c - 16-bit I/O port with interrupt and reset | ||
3 | * | ||
4 | * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> | ||
5 | * Copyright (C) 2007 Marvell International Ltd. | ||
6 | * | ||
7 | * Derived from drivers/i2c/chips/pca9539.c | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; version 2 of the License. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/i2c/pca9539.h> | ||
18 | |||
19 | #include <asm/gpio.h> | ||
20 | |||
21 | |||
22 | #define NR_PCA9539_GPIOS 16 | ||
23 | |||
24 | #define PCA9539_INPUT 0 | ||
25 | #define PCA9539_OUTPUT 2 | ||
26 | #define PCA9539_INVERT 4 | ||
27 | #define PCA9539_DIRECTION 6 | ||
28 | |||
29 | struct pca9539_chip { | ||
30 | unsigned gpio_start; | ||
31 | uint16_t reg_output; | ||
32 | uint16_t reg_direction; | ||
33 | |||
34 | struct i2c_client *client; | ||
35 | struct gpio_chip gpio_chip; | ||
36 | }; | ||
37 | |||
38 | /* NOTE: we can't currently rely on fault codes to come from SMBus | ||
39 | * calls, so we map all errors to EIO here and return zero otherwise. | ||
40 | */ | ||
41 | static int pca9539_write_reg(struct pca9539_chip *chip, int reg, uint16_t val) | ||
42 | { | ||
43 | if (i2c_smbus_write_word_data(chip->client, reg, val) < 0) | ||
44 | return -EIO; | ||
45 | else | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static int pca9539_read_reg(struct pca9539_chip *chip, int reg, uint16_t *val) | ||
50 | { | ||
51 | int ret; | ||
52 | |||
53 | ret = i2c_smbus_read_word_data(chip->client, reg); | ||
54 | if (ret < 0) { | ||
55 | dev_err(&chip->client->dev, "failed reading register\n"); | ||
56 | return -EIO; | ||
57 | } | ||
58 | |||
59 | *val = (uint16_t)ret; | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static int pca9539_gpio_direction_input(struct gpio_chip *gc, unsigned off) | ||
64 | { | ||
65 | struct pca9539_chip *chip; | ||
66 | uint16_t reg_val; | ||
67 | int ret; | ||
68 | |||
69 | chip = container_of(gc, struct pca9539_chip, gpio_chip); | ||
70 | |||
71 | reg_val = chip->reg_direction | (1u << off); | ||
72 | ret = pca9539_write_reg(chip, PCA9539_DIRECTION, reg_val); | ||
73 | if (ret) | ||
74 | return ret; | ||
75 | |||
76 | chip->reg_direction = reg_val; | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int pca9539_gpio_direction_output(struct gpio_chip *gc, | ||
81 | unsigned off, int val) | ||
82 | { | ||
83 | struct pca9539_chip *chip; | ||
84 | uint16_t reg_val; | ||
85 | int ret; | ||
86 | |||
87 | chip = container_of(gc, struct pca9539_chip, gpio_chip); | ||
88 | |||
89 | /* set output level */ | ||
90 | if (val) | ||
91 | reg_val = chip->reg_output | (1u << off); | ||
92 | else | ||
93 | reg_val = chip->reg_output & ~(1u << off); | ||
94 | |||
95 | ret = pca9539_write_reg(chip, PCA9539_OUTPUT, reg_val); | ||
96 | if (ret) | ||
97 | return ret; | ||
98 | |||
99 | chip->reg_output = reg_val; | ||
100 | |||
101 | /* then direction */ | ||
102 | reg_val = chip->reg_direction & ~(1u << off); | ||
103 | ret = pca9539_write_reg(chip, PCA9539_DIRECTION, reg_val); | ||
104 | if (ret) | ||
105 | return ret; | ||
106 | |||
107 | chip->reg_direction = reg_val; | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static int pca9539_gpio_get_value(struct gpio_chip *gc, unsigned off) | ||
112 | { | ||
113 | struct pca9539_chip *chip; | ||
114 | uint16_t reg_val; | ||
115 | int ret; | ||
116 | |||
117 | chip = container_of(gc, struct pca9539_chip, gpio_chip); | ||
118 | |||
119 | ret = pca9539_read_reg(chip, PCA9539_INPUT, ®_val); | ||
120 | if (ret < 0) { | ||
121 | /* NOTE: diagnostic already emitted; that's all we should | ||
122 | * do unless gpio_*_value_cansleep() calls become different | ||
123 | * from their nonsleeping siblings (and report faults). | ||
124 | */ | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | return (reg_val & (1u << off)) ? 1 : 0; | ||
129 | } | ||
130 | |||
131 | static void pca9539_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) | ||
132 | { | ||
133 | struct pca9539_chip *chip; | ||
134 | uint16_t reg_val; | ||
135 | int ret; | ||
136 | |||
137 | chip = container_of(gc, struct pca9539_chip, gpio_chip); | ||
138 | |||
139 | if (val) | ||
140 | reg_val = chip->reg_output | (1u << off); | ||
141 | else | ||
142 | reg_val = chip->reg_output & ~(1u << off); | ||
143 | |||
144 | ret = pca9539_write_reg(chip, PCA9539_OUTPUT, reg_val); | ||
145 | if (ret) | ||
146 | return; | ||
147 | |||
148 | chip->reg_output = reg_val; | ||
149 | } | ||
150 | |||
151 | static int pca9539_init_gpio(struct pca9539_chip *chip) | ||
152 | { | ||
153 | struct gpio_chip *gc; | ||
154 | |||
155 | gc = &chip->gpio_chip; | ||
156 | |||
157 | gc->direction_input = pca9539_gpio_direction_input; | ||
158 | gc->direction_output = pca9539_gpio_direction_output; | ||
159 | gc->get = pca9539_gpio_get_value; | ||
160 | gc->set = pca9539_gpio_set_value; | ||
161 | |||
162 | gc->base = chip->gpio_start; | ||
163 | gc->ngpio = NR_PCA9539_GPIOS; | ||
164 | gc->label = "pca9539"; | ||
165 | |||
166 | return gpiochip_add(gc); | ||
167 | } | ||
168 | |||
169 | static int __devinit pca9539_probe(struct i2c_client *client) | ||
170 | { | ||
171 | struct pca9539_platform_data *pdata; | ||
172 | struct pca9539_chip *chip; | ||
173 | int ret; | ||
174 | |||
175 | pdata = client->dev.platform_data; | ||
176 | if (pdata == NULL) | ||
177 | return -ENODEV; | ||
178 | |||
179 | chip = kzalloc(sizeof(struct pca9539_chip), GFP_KERNEL); | ||
180 | if (chip == NULL) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | chip->client = client; | ||
184 | |||
185 | chip->gpio_start = pdata->gpio_base; | ||
186 | |||
187 | /* initialize cached registers from their original values. | ||
188 | * we can't share this chip with another i2c master. | ||
189 | */ | ||
190 | ret = pca9539_read_reg(chip, PCA9539_OUTPUT, &chip->reg_output); | ||
191 | if (ret) | ||
192 | goto out_failed; | ||
193 | |||
194 | ret = pca9539_read_reg(chip, PCA9539_DIRECTION, &chip->reg_direction); | ||
195 | if (ret) | ||
196 | goto out_failed; | ||
197 | |||
198 | /* set platform specific polarity inversion */ | ||
199 | ret = pca9539_write_reg(chip, PCA9539_INVERT, pdata->invert); | ||
200 | if (ret) | ||
201 | goto out_failed; | ||
202 | |||
203 | ret = pca9539_init_gpio(chip); | ||
204 | if (ret) | ||
205 | goto out_failed; | ||
206 | |||
207 | if (pdata->setup) { | ||
208 | ret = pdata->setup(client, chip->gpio_chip.base, | ||
209 | chip->gpio_chip.ngpio, pdata->context); | ||
210 | if (ret < 0) | ||
211 | dev_warn(&client->dev, "setup failed, %d\n", ret); | ||
212 | } | ||
213 | |||
214 | i2c_set_clientdata(client, chip); | ||
215 | return 0; | ||
216 | |||
217 | out_failed: | ||
218 | kfree(chip); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | static int pca9539_remove(struct i2c_client *client) | ||
223 | { | ||
224 | struct pca9539_platform_data *pdata = client->dev.platform_data; | ||
225 | struct pca9539_chip *chip = i2c_get_clientdata(client); | ||
226 | int ret = 0; | ||
227 | |||
228 | if (pdata->teardown) { | ||
229 | ret = pdata->teardown(client, chip->gpio_chip.base, | ||
230 | chip->gpio_chip.ngpio, pdata->context); | ||
231 | if (ret < 0) { | ||
232 | dev_err(&client->dev, "%s failed, %d\n", | ||
233 | "teardown", ret); | ||
234 | return ret; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | ret = gpiochip_remove(&chip->gpio_chip); | ||
239 | if (ret) { | ||
240 | dev_err(&client->dev, "%s failed, %d\n", | ||
241 | "gpiochip_remove()", ret); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | kfree(chip); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static struct i2c_driver pca9539_driver = { | ||
250 | .driver = { | ||
251 | .name = "pca9539", | ||
252 | }, | ||
253 | .probe = pca9539_probe, | ||
254 | .remove = pca9539_remove, | ||
255 | }; | ||
256 | |||
257 | static int __init pca9539_init(void) | ||
258 | { | ||
259 | return i2c_add_driver(&pca9539_driver); | ||
260 | } | ||
261 | module_init(pca9539_init); | ||
262 | |||
263 | static void __exit pca9539_exit(void) | ||
264 | { | ||
265 | i2c_del_driver(&pca9539_driver); | ||
266 | } | ||
267 | module_exit(pca9539_exit); | ||
268 | |||
269 | MODULE_AUTHOR("eric miao <eric.miao@marvell.com>"); | ||
270 | MODULE_DESCRIPTION("GPIO expander driver for PCA9539"); | ||
271 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c new file mode 100644 index 000000000000..c6b3b5378384 --- /dev/null +++ b/drivers/gpio/pcf857x.c | |||
@@ -0,0 +1,330 @@ | |||
1 | /* | ||
2 | * pcf857x - driver for pcf857x, pca857x, and pca967x I2C GPIO expanders | ||
3 | * | ||
4 | * Copyright (C) 2007 David Brownell | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/i2c.h> | ||
24 | #include <linux/i2c/pcf857x.h> | ||
25 | |||
26 | #include <asm/gpio.h> | ||
27 | |||
28 | |||
29 | /* | ||
30 | * The pcf857x, pca857x, and pca967x chips only expose one read and one | ||
31 | * write register. Writing a "one" bit (to match the reset state) lets | ||
32 | * that pin be used as an input; it's not an open-drain model, but acts | ||
33 | * a bit like one. This is described as "quasi-bidirectional"; read the | ||
34 | * chip documentation for details. | ||
35 | * | ||
36 | * Many other I2C GPIO expander chips (like the pca953x models) have | ||
37 | * more complex register models and more conventional circuitry using | ||
38 | * push/pull drivers. They often use the same 0x20..0x27 addresses as | ||
39 | * pcf857x parts, making the "legacy" I2C driver model problematic. | ||
40 | */ | ||
41 | struct pcf857x { | ||
42 | struct gpio_chip chip; | ||
43 | struct i2c_client *client; | ||
44 | unsigned out; /* software latch */ | ||
45 | }; | ||
46 | |||
47 | /*-------------------------------------------------------------------------*/ | ||
48 | |||
49 | /* Talk to 8-bit I/O expander */ | ||
50 | |||
51 | static int pcf857x_input8(struct gpio_chip *chip, unsigned offset) | ||
52 | { | ||
53 | struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); | ||
54 | |||
55 | gpio->out |= (1 << offset); | ||
56 | return i2c_smbus_write_byte(gpio->client, gpio->out); | ||
57 | } | ||
58 | |||
59 | static int pcf857x_get8(struct gpio_chip *chip, unsigned offset) | ||
60 | { | ||
61 | struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); | ||
62 | s32 value; | ||
63 | |||
64 | value = i2c_smbus_read_byte(gpio->client); | ||
65 | return (value < 0) ? 0 : (value & (1 << offset)); | ||
66 | } | ||
67 | |||
68 | static int pcf857x_output8(struct gpio_chip *chip, unsigned offset, int value) | ||
69 | { | ||
70 | struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); | ||
71 | unsigned bit = 1 << offset; | ||
72 | |||
73 | if (value) | ||
74 | gpio->out |= bit; | ||
75 | else | ||
76 | gpio->out &= ~bit; | ||
77 | return i2c_smbus_write_byte(gpio->client, gpio->out); | ||
78 | } | ||
79 | |||
80 | static void pcf857x_set8(struct gpio_chip *chip, unsigned offset, int value) | ||
81 | { | ||
82 | pcf857x_output8(chip, offset, value); | ||
83 | } | ||
84 | |||
85 | /*-------------------------------------------------------------------------*/ | ||
86 | |||
87 | /* Talk to 16-bit I/O expander */ | ||
88 | |||
89 | static int i2c_write_le16(struct i2c_client *client, u16 word) | ||
90 | { | ||
91 | u8 buf[2] = { word & 0xff, word >> 8, }; | ||
92 | int status; | ||
93 | |||
94 | status = i2c_master_send(client, buf, 2); | ||
95 | return (status < 0) ? status : 0; | ||
96 | } | ||
97 | |||
98 | static int i2c_read_le16(struct i2c_client *client) | ||
99 | { | ||
100 | u8 buf[2]; | ||
101 | int status; | ||
102 | |||
103 | status = i2c_master_recv(client, buf, 2); | ||
104 | if (status < 0) | ||
105 | return status; | ||
106 | return (buf[1] << 8) | buf[0]; | ||
107 | } | ||
108 | |||
109 | static int pcf857x_input16(struct gpio_chip *chip, unsigned offset) | ||
110 | { | ||
111 | struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); | ||
112 | |||
113 | gpio->out |= (1 << offset); | ||
114 | return i2c_write_le16(gpio->client, gpio->out); | ||
115 | } | ||
116 | |||
117 | static int pcf857x_get16(struct gpio_chip *chip, unsigned offset) | ||
118 | { | ||
119 | struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); | ||
120 | int value; | ||
121 | |||
122 | value = i2c_read_le16(gpio->client); | ||
123 | return (value < 0) ? 0 : (value & (1 << offset)); | ||
124 | } | ||
125 | |||
126 | static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value) | ||
127 | { | ||
128 | struct pcf857x *gpio = container_of(chip, struct pcf857x, chip); | ||
129 | unsigned bit = 1 << offset; | ||
130 | |||
131 | if (value) | ||
132 | gpio->out |= bit; | ||
133 | else | ||
134 | gpio->out &= ~bit; | ||
135 | return i2c_write_le16(gpio->client, gpio->out); | ||
136 | } | ||
137 | |||
138 | static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value) | ||
139 | { | ||
140 | pcf857x_output16(chip, offset, value); | ||
141 | } | ||
142 | |||
143 | /*-------------------------------------------------------------------------*/ | ||
144 | |||
145 | static int pcf857x_probe(struct i2c_client *client) | ||
146 | { | ||
147 | struct pcf857x_platform_data *pdata; | ||
148 | struct pcf857x *gpio; | ||
149 | int status; | ||
150 | |||
151 | pdata = client->dev.platform_data; | ||
152 | if (!pdata) | ||
153 | return -ENODEV; | ||
154 | |||
155 | /* Allocate, initialize, and register this gpio_chip. */ | ||
156 | gpio = kzalloc(sizeof *gpio, GFP_KERNEL); | ||
157 | if (!gpio) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | gpio->chip.base = pdata->gpio_base; | ||
161 | gpio->chip.can_sleep = 1; | ||
162 | |||
163 | /* NOTE: the OnSemi jlc1562b is also largely compatible with | ||
164 | * these parts, notably for output. It has a low-resolution | ||
165 | * DAC instead of pin change IRQs; and its inputs can be the | ||
166 | * result of comparators. | ||
167 | */ | ||
168 | |||
169 | /* 8574 addresses are 0x20..0x27; 8574a uses 0x38..0x3f; | ||
170 | * 9670, 9672, 9764, and 9764a use quite a variety. | ||
171 | * | ||
172 | * NOTE: we don't distinguish here between *4 and *4a parts. | ||
173 | */ | ||
174 | if (strcmp(client->name, "pcf8574") == 0 | ||
175 | || strcmp(client->name, "pca8574") == 0 | ||
176 | || strcmp(client->name, "pca9670") == 0 | ||
177 | || strcmp(client->name, "pca9672") == 0 | ||
178 | || strcmp(client->name, "pca9674") == 0 | ||
179 | ) { | ||
180 | gpio->chip.ngpio = 8; | ||
181 | gpio->chip.direction_input = pcf857x_input8; | ||
182 | gpio->chip.get = pcf857x_get8; | ||
183 | gpio->chip.direction_output = pcf857x_output8; | ||
184 | gpio->chip.set = pcf857x_set8; | ||
185 | |||
186 | if (!i2c_check_functionality(client->adapter, | ||
187 | I2C_FUNC_SMBUS_BYTE)) | ||
188 | status = -EIO; | ||
189 | |||
190 | /* fail if there's no chip present */ | ||
191 | else | ||
192 | status = i2c_smbus_read_byte(client); | ||
193 | |||
194 | /* '75/'75c addresses are 0x20..0x27, just like the '74; | ||
195 | * the '75c doesn't have a current source pulling high. | ||
196 | * 9671, 9673, and 9765 use quite a variety of addresses. | ||
197 | * | ||
198 | * NOTE: we don't distinguish here between '75 and '75c parts. | ||
199 | */ | ||
200 | } else if (strcmp(client->name, "pcf8575") == 0 | ||
201 | || strcmp(client->name, "pca8575") == 0 | ||
202 | || strcmp(client->name, "pca9671") == 0 | ||
203 | || strcmp(client->name, "pca9673") == 0 | ||
204 | || strcmp(client->name, "pca9675") == 0 | ||
205 | ) { | ||
206 | gpio->chip.ngpio = 16; | ||
207 | gpio->chip.direction_input = pcf857x_input16; | ||
208 | gpio->chip.get = pcf857x_get16; | ||
209 | gpio->chip.direction_output = pcf857x_output16; | ||
210 | gpio->chip.set = pcf857x_set16; | ||
211 | |||
212 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) | ||
213 | status = -EIO; | ||
214 | |||
215 | /* fail if there's no chip present */ | ||
216 | else | ||
217 | status = i2c_read_le16(client); | ||
218 | |||
219 | } else | ||
220 | status = -ENODEV; | ||
221 | |||
222 | if (status < 0) | ||
223 | goto fail; | ||
224 | |||
225 | gpio->chip.label = client->name; | ||
226 | |||
227 | gpio->client = client; | ||
228 | i2c_set_clientdata(client, gpio); | ||
229 | |||
230 | /* NOTE: these chips have strange "quasi-bidirectional" I/O pins. | ||
231 | * We can't actually know whether a pin is configured (a) as output | ||
232 | * and driving the signal low, or (b) as input and reporting a low | ||
233 | * value ... without knowing the last value written since the chip | ||
234 | * came out of reset (if any). We can't read the latched output. | ||
235 | * | ||
236 | * In short, the only reliable solution for setting up pin direction | ||
237 | * is to do it explicitly. The setup() method can do that, but it | ||
238 | * may cause transient glitching since it can't know the last value | ||
239 | * written (some pins may need to be driven low). | ||
240 | * | ||
241 | * Using pdata->n_latch avoids that trouble. When left initialized | ||
242 | * to zero, our software copy of the "latch" then matches the chip's | ||
243 | * all-ones reset state. Otherwise it flags pins to be driven low. | ||
244 | */ | ||
245 | gpio->out = ~pdata->n_latch; | ||
246 | |||
247 | status = gpiochip_add(&gpio->chip); | ||
248 | if (status < 0) | ||
249 | goto fail; | ||
250 | |||
251 | /* NOTE: these chips can issue "some pin-changed" IRQs, which we | ||
252 | * don't yet even try to use. Among other issues, the relevant | ||
253 | * genirq state isn't available to modular drivers; and most irq | ||
254 | * methods can't be called from sleeping contexts. | ||
255 | */ | ||
256 | |||
257 | dev_info(&client->dev, "gpios %d..%d on a %s%s\n", | ||
258 | gpio->chip.base, | ||
259 | gpio->chip.base + gpio->chip.ngpio - 1, | ||
260 | client->name, | ||
261 | client->irq ? " (irq ignored)" : ""); | ||
262 | |||
263 | /* Let platform code set up the GPIOs and their users. | ||
264 | * Now is the first time anyone could use them. | ||
265 | */ | ||
266 | if (pdata->setup) { | ||
267 | status = pdata->setup(client, | ||
268 | gpio->chip.base, gpio->chip.ngpio, | ||
269 | pdata->context); | ||
270 | if (status < 0) | ||
271 | dev_warn(&client->dev, "setup --> %d\n", status); | ||
272 | } | ||
273 | |||
274 | return 0; | ||
275 | |||
276 | fail: | ||
277 | dev_dbg(&client->dev, "probe error %d for '%s'\n", | ||
278 | status, client->name); | ||
279 | kfree(gpio); | ||
280 | return status; | ||
281 | } | ||
282 | |||
283 | static int pcf857x_remove(struct i2c_client *client) | ||
284 | { | ||
285 | struct pcf857x_platform_data *pdata = client->dev.platform_data; | ||
286 | struct pcf857x *gpio = i2c_get_clientdata(client); | ||
287 | int status = 0; | ||
288 | |||
289 | if (pdata->teardown) { | ||
290 | status = pdata->teardown(client, | ||
291 | gpio->chip.base, gpio->chip.ngpio, | ||
292 | pdata->context); | ||
293 | if (status < 0) { | ||
294 | dev_err(&client->dev, "%s --> %d\n", | ||
295 | "teardown", status); | ||
296 | return status; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | status = gpiochip_remove(&gpio->chip); | ||
301 | if (status == 0) | ||
302 | kfree(gpio); | ||
303 | else | ||
304 | dev_err(&client->dev, "%s --> %d\n", "remove", status); | ||
305 | return status; | ||
306 | } | ||
307 | |||
308 | static struct i2c_driver pcf857x_driver = { | ||
309 | .driver = { | ||
310 | .name = "pcf857x", | ||
311 | .owner = THIS_MODULE, | ||
312 | }, | ||
313 | .probe = pcf857x_probe, | ||
314 | .remove = pcf857x_remove, | ||
315 | }; | ||
316 | |||
317 | static int __init pcf857x_init(void) | ||
318 | { | ||
319 | return i2c_add_driver(&pcf857x_driver); | ||
320 | } | ||
321 | module_init(pcf857x_init); | ||
322 | |||
323 | static void __exit pcf857x_exit(void) | ||
324 | { | ||
325 | i2c_del_driver(&pcf857x_driver); | ||
326 | } | ||
327 | module_exit(pcf857x_exit); | ||
328 | |||
329 | MODULE_LICENSE("GPL"); | ||
330 | MODULE_AUTHOR("David Brownell"); | ||
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig index bd7082c2443d..b21593f93586 100644 --- a/drivers/i2c/chips/Kconfig +++ b/drivers/i2c/chips/Kconfig | |||
@@ -54,8 +54,8 @@ config PCF8575 | |||
54 | hardware. If unsure, say N. | 54 | hardware. If unsure, say N. |
55 | 55 | ||
56 | config SENSORS_PCA9539 | 56 | config SENSORS_PCA9539 |
57 | tristate "Philips PCA9539 16-bit I/O port" | 57 | tristate "Philips PCA9539 16-bit I/O port (DEPRECATED)" |
58 | depends on EXPERIMENTAL | 58 | depends on EXPERIMENTAL && GPIO_PCA9539 = "n" |
59 | help | 59 | help |
60 | If you say yes here you get support for the Philips PCA9539 | 60 | If you say yes here you get support for the Philips PCA9539 |
61 | 16-bit I/O port. | 61 | 16-bit I/O port. |
@@ -63,6 +63,9 @@ config SENSORS_PCA9539 | |||
63 | This driver can also be built as a module. If so, the module | 63 | This driver can also be built as a module. If so, the module |
64 | will be called pca9539. | 64 | will be called pca9539. |
65 | 65 | ||
66 | This driver is deprecated and will be dropped soon. Use | ||
67 | drivers/gpio/pca9539.c instead. | ||
68 | |||
66 | config SENSORS_PCF8591 | 69 | config SENSORS_PCF8591 |
67 | tristate "Philips PCF8591" | 70 | tristate "Philips PCF8591" |
68 | depends on EXPERIMENTAL | 71 | depends on EXPERIMENTAL |
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index a193dfbf99d2..a5dc78ae62d4 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
@@ -44,8 +44,8 @@ source "drivers/infiniband/hw/ipath/Kconfig" | |||
44 | source "drivers/infiniband/hw/ehca/Kconfig" | 44 | source "drivers/infiniband/hw/ehca/Kconfig" |
45 | source "drivers/infiniband/hw/amso1100/Kconfig" | 45 | source "drivers/infiniband/hw/amso1100/Kconfig" |
46 | source "drivers/infiniband/hw/cxgb3/Kconfig" | 46 | source "drivers/infiniband/hw/cxgb3/Kconfig" |
47 | |||
48 | source "drivers/infiniband/hw/mlx4/Kconfig" | 47 | source "drivers/infiniband/hw/mlx4/Kconfig" |
48 | source "drivers/infiniband/hw/nes/Kconfig" | ||
49 | 49 | ||
50 | source "drivers/infiniband/ulp/ipoib/Kconfig" | 50 | source "drivers/infiniband/ulp/ipoib/Kconfig" |
51 | 51 | ||
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index 75f325e40b54..ed35e4496241 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile | |||
@@ -5,6 +5,7 @@ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ | |||
5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ | 5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ |
6 | obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ | 6 | obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ |
7 | obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ | 7 | obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ |
8 | obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ | ||
8 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ | 9 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ |
9 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ | 10 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ |
10 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ | 11 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c0150147d347..638b727d42e0 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -974,6 +974,9 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
974 | struct cm_id_private *cm_id_priv, | 974 | struct cm_id_private *cm_id_priv, |
975 | struct ib_cm_req_param *param) | 975 | struct ib_cm_req_param *param) |
976 | { | 976 | { |
977 | struct ib_sa_path_rec *pri_path = param->primary_path; | ||
978 | struct ib_sa_path_rec *alt_path = param->alternate_path; | ||
979 | |||
977 | cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, | 980 | cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, |
978 | cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); | 981 | cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); |
979 | 982 | ||
@@ -997,35 +1000,46 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
997 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); | 1000 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); |
998 | cm_req_set_srq(req_msg, param->srq); | 1001 | cm_req_set_srq(req_msg, param->srq); |
999 | 1002 | ||
1000 | req_msg->primary_local_lid = param->primary_path->slid; | 1003 | if (pri_path->hop_limit <= 1) { |
1001 | req_msg->primary_remote_lid = param->primary_path->dlid; | 1004 | req_msg->primary_local_lid = pri_path->slid; |
1002 | req_msg->primary_local_gid = param->primary_path->sgid; | 1005 | req_msg->primary_remote_lid = pri_path->dlid; |
1003 | req_msg->primary_remote_gid = param->primary_path->dgid; | 1006 | } else { |
1004 | cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); | 1007 | /* Work-around until there's a way to obtain remote LID info */ |
1005 | cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); | 1008 | req_msg->primary_local_lid = IB_LID_PERMISSIVE; |
1006 | req_msg->primary_traffic_class = param->primary_path->traffic_class; | 1009 | req_msg->primary_remote_lid = IB_LID_PERMISSIVE; |
1007 | req_msg->primary_hop_limit = param->primary_path->hop_limit; | 1010 | } |
1008 | cm_req_set_primary_sl(req_msg, param->primary_path->sl); | 1011 | req_msg->primary_local_gid = pri_path->sgid; |
1009 | cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ | 1012 | req_msg->primary_remote_gid = pri_path->dgid; |
1013 | cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); | ||
1014 | cm_req_set_primary_packet_rate(req_msg, pri_path->rate); | ||
1015 | req_msg->primary_traffic_class = pri_path->traffic_class; | ||
1016 | req_msg->primary_hop_limit = pri_path->hop_limit; | ||
1017 | cm_req_set_primary_sl(req_msg, pri_path->sl); | ||
1018 | cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); | ||
1010 | cm_req_set_primary_local_ack_timeout(req_msg, | 1019 | cm_req_set_primary_local_ack_timeout(req_msg, |
1011 | cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, | 1020 | cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, |
1012 | param->primary_path->packet_life_time)); | 1021 | pri_path->packet_life_time)); |
1013 | 1022 | ||
1014 | if (param->alternate_path) { | 1023 | if (alt_path) { |
1015 | req_msg->alt_local_lid = param->alternate_path->slid; | 1024 | if (alt_path->hop_limit <= 1) { |
1016 | req_msg->alt_remote_lid = param->alternate_path->dlid; | 1025 | req_msg->alt_local_lid = alt_path->slid; |
1017 | req_msg->alt_local_gid = param->alternate_path->sgid; | 1026 | req_msg->alt_remote_lid = alt_path->dlid; |
1018 | req_msg->alt_remote_gid = param->alternate_path->dgid; | 1027 | } else { |
1028 | req_msg->alt_local_lid = IB_LID_PERMISSIVE; | ||
1029 | req_msg->alt_remote_lid = IB_LID_PERMISSIVE; | ||
1030 | } | ||
1031 | req_msg->alt_local_gid = alt_path->sgid; | ||
1032 | req_msg->alt_remote_gid = alt_path->dgid; | ||
1019 | cm_req_set_alt_flow_label(req_msg, | 1033 | cm_req_set_alt_flow_label(req_msg, |
1020 | param->alternate_path->flow_label); | 1034 | alt_path->flow_label); |
1021 | cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); | 1035 | cm_req_set_alt_packet_rate(req_msg, alt_path->rate); |
1022 | req_msg->alt_traffic_class = param->alternate_path->traffic_class; | 1036 | req_msg->alt_traffic_class = alt_path->traffic_class; |
1023 | req_msg->alt_hop_limit = param->alternate_path->hop_limit; | 1037 | req_msg->alt_hop_limit = alt_path->hop_limit; |
1024 | cm_req_set_alt_sl(req_msg, param->alternate_path->sl); | 1038 | cm_req_set_alt_sl(req_msg, alt_path->sl); |
1025 | cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ | 1039 | cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); |
1026 | cm_req_set_alt_local_ack_timeout(req_msg, | 1040 | cm_req_set_alt_local_ack_timeout(req_msg, |
1027 | cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, | 1041 | cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, |
1028 | param->alternate_path->packet_life_time)); | 1042 | alt_path->packet_life_time)); |
1029 | } | 1043 | } |
1030 | 1044 | ||
1031 | if (param->private_data && param->private_data_len) | 1045 | if (param->private_data && param->private_data_len) |
@@ -1441,6 +1455,34 @@ out: | |||
1441 | return listen_cm_id_priv; | 1455 | return listen_cm_id_priv; |
1442 | } | 1456 | } |
1443 | 1457 | ||
1458 | /* | ||
1459 | * Work-around for inter-subnet connections. If the LIDs are permissive, | ||
1460 | * we need to override the LID/SL data in the REQ with the LID information | ||
1461 | * in the work completion. | ||
1462 | */ | ||
1463 | static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) | ||
1464 | { | ||
1465 | if (!cm_req_get_primary_subnet_local(req_msg)) { | ||
1466 | if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { | ||
1467 | req_msg->primary_local_lid = cpu_to_be16(wc->slid); | ||
1468 | cm_req_set_primary_sl(req_msg, wc->sl); | ||
1469 | } | ||
1470 | |||
1471 | if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) | ||
1472 | req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); | ||
1473 | } | ||
1474 | |||
1475 | if (!cm_req_get_alt_subnet_local(req_msg)) { | ||
1476 | if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { | ||
1477 | req_msg->alt_local_lid = cpu_to_be16(wc->slid); | ||
1478 | cm_req_set_alt_sl(req_msg, wc->sl); | ||
1479 | } | ||
1480 | |||
1481 | if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) | ||
1482 | req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); | ||
1483 | } | ||
1484 | } | ||
1485 | |||
1444 | static int cm_req_handler(struct cm_work *work) | 1486 | static int cm_req_handler(struct cm_work *work) |
1445 | { | 1487 | { |
1446 | struct ib_cm_id *cm_id; | 1488 | struct ib_cm_id *cm_id; |
@@ -1481,6 +1523,7 @@ static int cm_req_handler(struct cm_work *work) | |||
1481 | cm_id_priv->id.service_id = req_msg->service_id; | 1523 | cm_id_priv->id.service_id = req_msg->service_id; |
1482 | cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); | 1524 | cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); |
1483 | 1525 | ||
1526 | cm_process_routed_req(req_msg, work->mad_recv_wc->wc); | ||
1484 | cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); | 1527 | cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); |
1485 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); | 1528 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); |
1486 | if (ret) { | 1529 | if (ret) { |
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 6c7aa59794d4..7f00347364f7 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
@@ -320,10 +320,13 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
320 | .max_maps = pool->max_remaps, | 320 | .max_maps = pool->max_remaps, |
321 | .page_shift = params->page_shift | 321 | .page_shift = params->page_shift |
322 | }; | 322 | }; |
323 | int bytes_per_fmr = sizeof *fmr; | ||
324 | |||
325 | if (pool->cache_bucket) | ||
326 | bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64); | ||
323 | 327 | ||
324 | for (i = 0; i < params->pool_size; ++i) { | 328 | for (i = 0; i < params->pool_size; ++i) { |
325 | fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), | 329 | fmr = kmalloc(bytes_per_fmr, GFP_KERNEL); |
326 | GFP_KERNEL); | ||
327 | if (!fmr) { | 330 | if (!fmr) { |
328 | printk(KERN_WARNING PFX "failed to allocate fmr " | 331 | printk(KERN_WARNING PFX "failed to allocate fmr " |
329 | "struct for FMR %d\n", i); | 332 | "struct for FMR %d\n", i); |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index f281d16040f5..92cce8aacbb7 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -101,6 +101,7 @@ struct ehca_sport { | |||
101 | spinlock_t mod_sqp_lock; | 101 | spinlock_t mod_sqp_lock; |
102 | enum ib_port_state port_state; | 102 | enum ib_port_state port_state; |
103 | struct ehca_sma_attr saved_attr; | 103 | struct ehca_sma_attr saved_attr; |
104 | u32 pma_qp_nr; | ||
104 | }; | 105 | }; |
105 | 106 | ||
106 | #define HCA_CAP_MR_PGSIZE_4K 0x80000000 | 107 | #define HCA_CAP_MR_PGSIZE_4K 0x80000000 |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 863b34fa9ff9..b5ca94c6b8d9 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -403,6 +403,8 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) | |||
403 | sport->port_state = IB_PORT_ACTIVE; | 403 | sport->port_state = IB_PORT_ACTIVE; |
404 | dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, | 404 | dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, |
405 | "is active"); | 405 | "is active"); |
406 | ehca_query_sma_attr(shca, port, | ||
407 | &sport->saved_attr); | ||
406 | } else | 408 | } else |
407 | notify_port_conf_change(shca, port); | 409 | notify_port_conf_change(shca, port); |
408 | break; | 410 | break; |
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index c469bfde2708..a8a2ea585d2f 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context); | |||
187 | 187 | ||
188 | int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | 188 | int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); |
189 | 189 | ||
190 | int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | ||
191 | struct ib_wc *in_wc, struct ib_grh *in_grh, | ||
192 | struct ib_mad *in_mad, | ||
193 | struct ib_mad *out_mad); | ||
194 | |||
190 | void ehca_poll_eqs(unsigned long data); | 195 | void ehca_poll_eqs(unsigned long data); |
191 | 196 | ||
192 | int ehca_calc_ipd(struct ehca_shca *shca, int port, | 197 | int ehca_calc_ipd(struct ehca_shca *shca, int port, |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 84c9b7b8669b..a86ebcc79a95 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -472,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca) | |||
472 | shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; | 472 | shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; |
473 | shca->ib_device.attach_mcast = ehca_attach_mcast; | 473 | shca->ib_device.attach_mcast = ehca_attach_mcast; |
474 | shca->ib_device.detach_mcast = ehca_detach_mcast; | 474 | shca->ib_device.detach_mcast = ehca_detach_mcast; |
475 | /* shca->ib_device.process_mad = ehca_process_mad; */ | 475 | shca->ib_device.process_mad = ehca_process_mad; |
476 | shca->ib_device.mmap = ehca_mmap; | 476 | shca->ib_device.mmap = ehca_mmap; |
477 | 477 | ||
478 | if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { | 478 | if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 3aacc8cf1e44..2ce8cffb8664 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -209,6 +209,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
209 | ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); | 209 | ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); |
210 | return -EINVAL; | 210 | return -EINVAL; |
211 | } | 211 | } |
212 | if (unlikely(send_wr->wr.ud.remote_qpn == 0)) { | ||
213 | ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); | ||
214 | return -EINVAL; | ||
215 | } | ||
212 | my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); | 216 | my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); |
213 | wqe_p->u.ud_av.ud_av = my_av->av; | 217 | wqe_p->u.ud_av.ud_av = my_av->av; |
214 | 218 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index 79e72b25b252..706d97ad5555 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c | |||
@@ -39,12 +39,18 @@ | |||
39 | * POSSIBILITY OF SUCH DAMAGE. | 39 | * POSSIBILITY OF SUCH DAMAGE. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #include <rdma/ib_mad.h> | ||
42 | 43 | ||
43 | #include "ehca_classes.h" | 44 | #include "ehca_classes.h" |
44 | #include "ehca_tools.h" | 45 | #include "ehca_tools.h" |
45 | #include "ehca_iverbs.h" | 46 | #include "ehca_iverbs.h" |
46 | #include "hcp_if.h" | 47 | #include "hcp_if.h" |
47 | 48 | ||
49 | #define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) | ||
50 | #define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) | ||
51 | #define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) | ||
52 | |||
53 | #define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) | ||
48 | 54 | ||
49 | /** | 55 | /** |
50 | * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue | 56 | * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue |
@@ -83,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca, | |||
83 | port, ret); | 89 | port, ret); |
84 | return ret; | 90 | return ret; |
85 | } | 91 | } |
92 | shca->sport[port - 1].pma_qp_nr = pma_qp_nr; | ||
93 | ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x", | ||
94 | port, pma_qp_nr); | ||
86 | break; | 95 | break; |
87 | default: | 96 | default: |
88 | ehca_err(&shca->ib_device, "invalid qp_type=%x", | 97 | ehca_err(&shca->ib_device, "invalid qp_type=%x", |
@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca, | |||
109 | 118 | ||
110 | return H_SUCCESS; | 119 | return H_SUCCESS; |
111 | } | 120 | } |
121 | |||
122 | struct ib_perf { | ||
123 | struct ib_mad_hdr mad_hdr; | ||
124 | u8 reserved[40]; | ||
125 | u8 data[192]; | ||
126 | } __attribute__ ((packed)); | ||
127 | |||
128 | |||
129 | static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, | ||
130 | struct ib_mad *in_mad, struct ib_mad *out_mad) | ||
131 | { | ||
132 | struct ib_perf *in_perf = (struct ib_perf *)in_mad; | ||
133 | struct ib_perf *out_perf = (struct ib_perf *)out_mad; | ||
134 | struct ib_class_port_info *poi = | ||
135 | (struct ib_class_port_info *)out_perf->data; | ||
136 | struct ehca_shca *shca = | ||
137 | container_of(ibdev, struct ehca_shca, ib_device); | ||
138 | struct ehca_sport *sport = &shca->sport[port_num - 1]; | ||
139 | |||
140 | ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method); | ||
141 | |||
142 | *out_mad = *in_mad; | ||
143 | |||
144 | if (in_perf->mad_hdr.class_version != 1) { | ||
145 | ehca_warn(ibdev, "Unsupported class_version=%x", | ||
146 | in_perf->mad_hdr.class_version); | ||
147 | out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION; | ||
148 | goto perf_reply; | ||
149 | } | ||
150 | |||
151 | switch (in_perf->mad_hdr.method) { | ||
152 | case IB_MGMT_METHOD_GET: | ||
153 | case IB_MGMT_METHOD_SET: | ||
154 | /* set class port info for redirection */ | ||
155 | out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO; | ||
156 | out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT; | ||
157 | memset(poi, 0, sizeof(*poi)); | ||
158 | poi->base_version = 1; | ||
159 | poi->class_version = 1; | ||
160 | poi->resp_time_value = 18; | ||
161 | poi->redirect_lid = sport->saved_attr.lid; | ||
162 | poi->redirect_qp = sport->pma_qp_nr; | ||
163 | poi->redirect_qkey = IB_QP1_QKEY; | ||
164 | poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; | ||
165 | |||
166 | ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", | ||
167 | sport->saved_attr.lid, sport->pma_qp_nr); | ||
168 | break; | ||
169 | |||
170 | case IB_MGMT_METHOD_GET_RESP: | ||
171 | return IB_MAD_RESULT_FAILURE; | ||
172 | |||
173 | default: | ||
174 | out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD; | ||
175 | break; | ||
176 | } | ||
177 | |||
178 | perf_reply: | ||
179 | out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | ||
180 | |||
181 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | ||
182 | } | ||
183 | |||
184 | int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | ||
185 | struct ib_wc *in_wc, struct ib_grh *in_grh, | ||
186 | struct ib_mad *in_mad, | ||
187 | struct ib_mad *out_mad) | ||
188 | { | ||
189 | int ret; | ||
190 | |||
191 | if (!port_num || port_num > ibdev->phys_port_cnt) | ||
192 | return IB_MAD_RESULT_FAILURE; | ||
193 | |||
194 | /* accept only pma request */ | ||
195 | if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) | ||
196 | return IB_MAD_RESULT_SUCCESS; | ||
197 | |||
198 | ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); | ||
199 | ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); | ||
200 | |||
201 | return ret; | ||
202 | } | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index d8287d9db41e..96a39b5c9254 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_VERSION(DRV_VERSION); | 53 | MODULE_VERSION(DRV_VERSION); |
54 | 54 | ||
55 | static const char mlx4_ib_version[] __devinitdata = | 55 | static const char mlx4_ib_version[] = |
56 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" | 56 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" |
57 | DRV_VERSION " (" DRV_RELDATE ")\n"; | 57 | DRV_VERSION " (" DRV_RELDATE ")\n"; |
58 | 58 | ||
@@ -468,6 +468,7 @@ static int init_node_data(struct mlx4_ib_dev *dev) | |||
468 | if (err) | 468 | if (err) |
469 | goto out; | 469 | goto out; |
470 | 470 | ||
471 | dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); | ||
471 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); | 472 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); |
472 | 473 | ||
473 | out: | 474 | out: |
@@ -516,9 +517,16 @@ static struct class_device_attribute *mlx4_class_attributes[] = { | |||
516 | 517 | ||
517 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 518 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
518 | { | 519 | { |
520 | static int mlx4_ib_version_printed; | ||
519 | struct mlx4_ib_dev *ibdev; | 521 | struct mlx4_ib_dev *ibdev; |
520 | int i; | 522 | int i; |
521 | 523 | ||
524 | |||
525 | if (!mlx4_ib_version_printed) { | ||
526 | printk(KERN_INFO "%s", mlx4_ib_version); | ||
527 | ++mlx4_ib_version_printed; | ||
528 | } | ||
529 | |||
522 | ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); | 530 | ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); |
523 | if (!ibdev) { | 531 | if (!ibdev) { |
524 | dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); | 532 | dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 6966f943f440..09a30dd12b14 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1255,9 +1255,14 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev, | |||
1255 | if (err) | 1255 | if (err) |
1256 | goto out; | 1256 | goto out; |
1257 | 1257 | ||
1258 | MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); | 1258 | if (!mthca_is_memfree(dev)) { |
1259 | MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); | 1259 | MTHCA_GET(adapter->vendor_id, outbox, |
1260 | MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); | 1260 | QUERY_ADAPTER_VENDOR_ID_OFFSET); |
1261 | MTHCA_GET(adapter->device_id, outbox, | ||
1262 | QUERY_ADAPTER_DEVICE_ID_OFFSET); | ||
1263 | MTHCA_GET(adapter->revision_id, outbox, | ||
1264 | QUERY_ADAPTER_REVISION_ID_OFFSET); | ||
1265 | } | ||
1261 | MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); | 1266 | MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); |
1262 | 1267 | ||
1263 | get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, | 1268 | get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 5cf8250d4e16..cd3d8adbef9f 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -126,7 +126,7 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444); | |||
126 | MODULE_PARM_DESC(fmr_reserved_mtts, | 126 | MODULE_PARM_DESC(fmr_reserved_mtts, |
127 | "number of memory translation table segments reserved for FMR"); | 127 | "number of memory translation table segments reserved for FMR"); |
128 | 128 | ||
129 | static const char mthca_version[] __devinitdata = | 129 | static char mthca_version[] __devinitdata = |
130 | DRV_NAME ": Mellanox InfiniBand HCA driver v" | 130 | DRV_NAME ": Mellanox InfiniBand HCA driver v" |
131 | DRV_VERSION " (" DRV_RELDATE ")\n"; | 131 | DRV_VERSION " (" DRV_RELDATE ")\n"; |
132 | 132 | ||
@@ -735,7 +735,8 @@ static int mthca_init_hca(struct mthca_dev *mdev) | |||
735 | } | 735 | } |
736 | 736 | ||
737 | mdev->eq_table.inta_pin = adapter.inta_pin; | 737 | mdev->eq_table.inta_pin = adapter.inta_pin; |
738 | mdev->rev_id = adapter.revision_id; | 738 | if (!mthca_is_memfree(mdev)) |
739 | mdev->rev_id = adapter.revision_id; | ||
739 | memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); | 740 | memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); |
740 | 741 | ||
741 | return 0; | 742 | return 0; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index aa6c70a6a36f..3b6985557cb2 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -613,8 +613,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, | |||
613 | sizeof *(mr->mem.tavor.mpt) * idx; | 613 | sizeof *(mr->mem.tavor.mpt) * idx; |
614 | 614 | ||
615 | mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); | 615 | mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); |
616 | if (IS_ERR(mr->mtt)) | 616 | if (IS_ERR(mr->mtt)) { |
617 | err = PTR_ERR(mr->mtt); | ||
617 | goto err_out_table; | 618 | goto err_out_table; |
619 | } | ||
618 | 620 | ||
619 | mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; | 621 | mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; |
620 | 622 | ||
@@ -627,8 +629,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, | |||
627 | mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; | 629 | mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; |
628 | 630 | ||
629 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | 631 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
630 | if (IS_ERR(mailbox)) | 632 | if (IS_ERR(mailbox)) { |
633 | err = PTR_ERR(mailbox); | ||
631 | goto err_out_free_mtt; | 634 | goto err_out_free_mtt; |
635 | } | ||
632 | 636 | ||
633 | mpt_entry = mailbox->buf; | 637 | mpt_entry = mailbox->buf; |
634 | 638 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 6bcde1cb9688..9e491df6419c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, | |||
923 | struct mthca_mr *mr; | 923 | struct mthca_mr *mr; |
924 | u64 *page_list; | 924 | u64 *page_list; |
925 | u64 total_size; | 925 | u64 total_size; |
926 | u64 mask; | 926 | unsigned long mask; |
927 | int shift; | 927 | int shift; |
928 | int npages; | 928 | int npages; |
929 | int err; | 929 | int err; |
930 | int i, j, n; | 930 | int i, j, n; |
931 | 931 | ||
932 | /* First check that we have enough alignment */ | 932 | mask = buffer_list[0].addr ^ *iova_start; |
933 | if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) | ||
934 | return ERR_PTR(-EINVAL); | ||
935 | |||
936 | mask = 0; | ||
937 | total_size = 0; | 933 | total_size = 0; |
938 | for (i = 0; i < num_phys_buf; ++i) { | 934 | for (i = 0; i < num_phys_buf; ++i) { |
939 | if (i != 0) | 935 | if (i != 0) |
@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, | |||
947 | if (mask & ~PAGE_MASK) | 943 | if (mask & ~PAGE_MASK) |
948 | return ERR_PTR(-EINVAL); | 944 | return ERR_PTR(-EINVAL); |
949 | 945 | ||
950 | /* Find largest page shift we can use to cover buffers */ | 946 | shift = __ffs(mask | 1 << 31); |
951 | for (shift = PAGE_SHIFT; shift < 31; ++shift) | ||
952 | if (num_phys_buf > 1) { | ||
953 | if ((1ULL << shift) & mask) | ||
954 | break; | ||
955 | } else { | ||
956 | if (1ULL << shift >= | ||
957 | buffer_list[0].size + | ||
958 | (buffer_list[0].addr & ((1ULL << shift) - 1))) | ||
959 | break; | ||
960 | } | ||
961 | 947 | ||
962 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); | 948 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); |
963 | buffer_list[0].addr &= ~0ull << shift; | 949 | buffer_list[0].addr &= ~0ull << shift; |
@@ -1270,6 +1256,8 @@ static int mthca_init_node_data(struct mthca_dev *dev) | |||
1270 | goto out; | 1256 | goto out; |
1271 | } | 1257 | } |
1272 | 1258 | ||
1259 | if (mthca_is_memfree(dev)) | ||
1260 | dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); | ||
1273 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); | 1261 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); |
1274 | 1262 | ||
1275 | out: | 1263 | out: |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 0e5461c65731..db5595bbf7f0 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1175 | { | 1175 | { |
1176 | int ret; | 1176 | int ret; |
1177 | int i; | 1177 | int i; |
1178 | struct mthca_next_seg *next; | ||
1178 | 1179 | ||
1179 | qp->refcount = 1; | 1180 | qp->refcount = 1; |
1180 | init_waitqueue_head(&qp->wait); | 1181 | init_waitqueue_head(&qp->wait); |
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1217 | } | 1218 | } |
1218 | 1219 | ||
1219 | if (mthca_is_memfree(dev)) { | 1220 | if (mthca_is_memfree(dev)) { |
1220 | struct mthca_next_seg *next; | ||
1221 | struct mthca_data_seg *scatter; | 1221 | struct mthca_data_seg *scatter; |
1222 | int size = (sizeof (struct mthca_next_seg) + | 1222 | int size = (sizeof (struct mthca_next_seg) + |
1223 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; | 1223 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; |
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1240 | qp->sq.wqe_shift) + | 1240 | qp->sq.wqe_shift) + |
1241 | qp->send_wqe_offset); | 1241 | qp->send_wqe_offset); |
1242 | } | 1242 | } |
1243 | } else { | ||
1244 | for (i = 0; i < qp->rq.max; ++i) { | ||
1245 | next = get_recv_wqe(qp, i); | ||
1246 | next->nda_op = htonl((((i + 1) % qp->rq.max) << | ||
1247 | qp->rq.wqe_shift) | 1); | ||
1248 | } | ||
1249 | |||
1243 | } | 1250 | } |
1244 | 1251 | ||
1245 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); | 1252 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); |
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1863 | prev_wqe = qp->rq.last; | 1870 | prev_wqe = qp->rq.last; |
1864 | qp->rq.last = wqe; | 1871 | qp->rq.last = wqe; |
1865 | 1872 | ||
1866 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | ||
1867 | ((struct mthca_next_seg *) wqe)->ee_nds = | 1873 | ((struct mthca_next_seg *) wqe)->ee_nds = |
1868 | cpu_to_be32(MTHCA_NEXT_DBD); | 1874 | cpu_to_be32(MTHCA_NEXT_DBD); |
1869 | ((struct mthca_next_seg *) wqe)->flags = 0; | 1875 | ((struct mthca_next_seg *) wqe)->flags = 0; |
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1885 | 1891 | ||
1886 | qp->wrid[ind] = wr->wr_id; | 1892 | qp->wrid[ind] = wr->wr_id; |
1887 | 1893 | ||
1888 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | ||
1889 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | ||
1890 | wmb(); | ||
1891 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1894 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1892 | cpu_to_be32(MTHCA_NEXT_DBD | size); | 1895 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1893 | 1896 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 553d681f6813..a5ffff6e1026 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, | |||
175 | * scatter list L_Keys to the sentry value of 0x100. | 175 | * scatter list L_Keys to the sentry value of 0x100. |
176 | */ | 176 | */ |
177 | for (i = 0; i < srq->max; ++i) { | 177 | for (i = 0; i < srq->max; ++i) { |
178 | wqe = get_wqe(srq, i); | 178 | struct mthca_next_seg *next; |
179 | 179 | ||
180 | *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; | 180 | next = wqe = get_wqe(srq, i); |
181 | |||
182 | if (i < srq->max - 1) { | ||
183 | *wqe_to_link(wqe) = i + 1; | ||
184 | next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); | ||
185 | } else { | ||
186 | *wqe_to_link(wqe) = -1; | ||
187 | next->nda_op = 0; | ||
188 | } | ||
181 | 189 | ||
182 | for (scatter = wqe + sizeof (struct mthca_next_seg); | 190 | for (scatter = wqe + sizeof (struct mthca_next_seg); |
183 | (void *) scatter < wqe + (1 << srq->wqe_shift); | 191 | (void *) scatter < wqe + (1 << srq->wqe_shift); |
@@ -470,16 +478,15 @@ out: | |||
470 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) | 478 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) |
471 | { | 479 | { |
472 | int ind; | 480 | int ind; |
481 | struct mthca_next_seg *last_free; | ||
473 | 482 | ||
474 | ind = wqe_addr >> srq->wqe_shift; | 483 | ind = wqe_addr >> srq->wqe_shift; |
475 | 484 | ||
476 | spin_lock(&srq->lock); | 485 | spin_lock(&srq->lock); |
477 | 486 | ||
478 | if (likely(srq->first_free >= 0)) | 487 | last_free = get_wqe(srq, srq->last_free); |
479 | *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; | 488 | *wqe_to_link(last_free) = ind; |
480 | else | 489 | last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); |
481 | srq->first_free = ind; | ||
482 | |||
483 | *wqe_to_link(get_wqe(srq, ind)) = -1; | 490 | *wqe_to_link(get_wqe(srq, ind)) = -1; |
484 | srq->last_free = ind; | 491 | srq->last_free = ind; |
485 | 492 | ||
@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
506 | first_ind = srq->first_free; | 513 | first_ind = srq->first_free; |
507 | 514 | ||
508 | for (nreq = 0; wr; wr = wr->next) { | 515 | for (nreq = 0; wr; wr = wr->next) { |
509 | ind = srq->first_free; | 516 | ind = srq->first_free; |
510 | |||
511 | if (unlikely(ind < 0)) { | ||
512 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | ||
513 | err = -ENOMEM; | ||
514 | *bad_wr = wr; | ||
515 | break; | ||
516 | } | ||
517 | |||
518 | wqe = get_wqe(srq, ind); | 517 | wqe = get_wqe(srq, ind); |
519 | next_ind = *wqe_to_link(wqe); | 518 | next_ind = *wqe_to_link(wqe); |
520 | 519 | ||
@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
528 | prev_wqe = srq->last; | 527 | prev_wqe = srq->last; |
529 | srq->last = wqe; | 528 | srq->last = wqe; |
530 | 529 | ||
531 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | ||
532 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | 530 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; |
533 | /* flags field will always remain 0 */ | 531 | /* flags field will always remain 0 */ |
534 | 532 | ||
@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
549 | if (i < srq->max_gs) | 547 | if (i < srq->max_gs) |
550 | mthca_set_data_seg_inval(wqe); | 548 | mthca_set_data_seg_inval(wqe); |
551 | 549 | ||
552 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | ||
553 | cpu_to_be32((ind << srq->wqe_shift) | 1); | ||
554 | wmb(); | ||
555 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 550 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
556 | cpu_to_be32(MTHCA_NEXT_DBD); | 551 | cpu_to_be32(MTHCA_NEXT_DBD); |
557 | 552 | ||
@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
614 | spin_lock_irqsave(&srq->lock, flags); | 609 | spin_lock_irqsave(&srq->lock, flags); |
615 | 610 | ||
616 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 611 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
617 | ind = srq->first_free; | 612 | ind = srq->first_free; |
618 | |||
619 | if (unlikely(ind < 0)) { | ||
620 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | ||
621 | err = -ENOMEM; | ||
622 | *bad_wr = wr; | ||
623 | break; | ||
624 | } | ||
625 | |||
626 | wqe = get_wqe(srq, ind); | 613 | wqe = get_wqe(srq, ind); |
627 | next_ind = *wqe_to_link(wqe); | 614 | next_ind = *wqe_to_link(wqe); |
628 | 615 | ||
@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
633 | break; | 620 | break; |
634 | } | 621 | } |
635 | 622 | ||
636 | ((struct mthca_next_seg *) wqe)->nda_op = | ||
637 | cpu_to_be32((next_ind << srq->wqe_shift) | 1); | ||
638 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | 623 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; |
639 | /* flags field will always remain 0 */ | 624 | /* flags field will always remain 0 */ |
640 | 625 | ||
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig new file mode 100644 index 000000000000..2aeb7ac972a9 --- /dev/null +++ b/drivers/infiniband/hw/nes/Kconfig | |||
@@ -0,0 +1,16 @@ | |||
1 | config INFINIBAND_NES | ||
2 | tristate "NetEffect RNIC Driver" | ||
3 | depends on PCI && INET && INFINIBAND | ||
4 | select LIBCRC32C | ||
5 | ---help--- | ||
6 | This is a low-level driver for NetEffect RDMA enabled | ||
7 | Network Interface Cards (RNIC). | ||
8 | |||
9 | config INFINIBAND_NES_DEBUG | ||
10 | bool "Verbose debugging output" | ||
11 | depends on INFINIBAND_NES | ||
12 | default n | ||
13 | ---help--- | ||
14 | This option causes the NetEffect RNIC driver to produce debug | ||
15 | messages. Select this if you are developing the driver | ||
16 | or trying to diagnose a problem. | ||
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile new file mode 100644 index 000000000000..35148513c47e --- /dev/null +++ b/drivers/infiniband/hw/nes/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o | ||
2 | |||
3 | iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o | ||
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c new file mode 100644 index 000000000000..7f8853b44ee1 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -0,0 +1,1152 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ethtool.h> | ||
39 | #include <linux/mii.h> | ||
40 | #include <linux/if_vlan.h> | ||
41 | #include <linux/crc32.h> | ||
42 | #include <linux/in.h> | ||
43 | #include <linux/fs.h> | ||
44 | #include <linux/init.h> | ||
45 | #include <linux/if_arp.h> | ||
46 | #include <linux/highmem.h> | ||
47 | #include <asm/io.h> | ||
48 | #include <asm/irq.h> | ||
49 | #include <asm/byteorder.h> | ||
50 | #include <rdma/ib_smi.h> | ||
51 | #include <rdma/ib_verbs.h> | ||
52 | #include <rdma/ib_pack.h> | ||
53 | #include <rdma/iw_cm.h> | ||
54 | |||
55 | #include "nes.h" | ||
56 | |||
57 | #include <net/netevent.h> | ||
58 | #include <net/neighbour.h> | ||
59 | #include <linux/route.h> | ||
60 | #include <net/ip_fib.h> | ||
61 | |||
62 | MODULE_AUTHOR("NetEffect"); | ||
63 | MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver"); | ||
64 | MODULE_LICENSE("Dual BSD/GPL"); | ||
65 | MODULE_VERSION(DRV_VERSION); | ||
66 | |||
67 | int max_mtu = 9000; | ||
68 | int nics_per_function = 1; | ||
69 | int interrupt_mod_interval = 0; | ||
70 | |||
71 | |||
72 | /* Interoperability */ | ||
73 | int mpa_version = 1; | ||
74 | module_param(mpa_version, int, 0); | ||
75 | MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)"); | ||
76 | |||
77 | /* Interoperability */ | ||
78 | int disable_mpa_crc = 0; | ||
79 | module_param(disable_mpa_crc, int, 0); | ||
80 | MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); | ||
81 | |||
82 | unsigned int send_first = 0; | ||
83 | module_param(send_first, int, 0); | ||
84 | MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); | ||
85 | |||
86 | |||
87 | unsigned int nes_drv_opt = 0; | ||
88 | module_param(nes_drv_opt, int, 0); | ||
89 | MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); | ||
90 | |||
91 | unsigned int nes_debug_level = 0; | ||
92 | module_param_named(debug_level, nes_debug_level, uint, 0644); | ||
93 | MODULE_PARM_DESC(debug_level, "Enable debug output level"); | ||
94 | |||
95 | LIST_HEAD(nes_adapter_list); | ||
96 | LIST_HEAD(nes_dev_list); | ||
97 | |||
98 | atomic_t qps_destroyed; | ||
99 | atomic_t cqp_reqs_allocated; | ||
100 | atomic_t cqp_reqs_freed; | ||
101 | atomic_t cqp_reqs_dynallocated; | ||
102 | atomic_t cqp_reqs_dynfreed; | ||
103 | atomic_t cqp_reqs_queued; | ||
104 | atomic_t cqp_reqs_redriven; | ||
105 | |||
106 | static void nes_print_macaddr(struct net_device *netdev); | ||
107 | static irqreturn_t nes_interrupt(int, void *); | ||
108 | static int __devinit nes_probe(struct pci_dev *, const struct pci_device_id *); | ||
109 | static void __devexit nes_remove(struct pci_dev *); | ||
110 | static int __init nes_init_module(void); | ||
111 | static void __exit nes_exit_module(void); | ||
112 | static unsigned int ee_flsh_adapter; | ||
113 | static unsigned int sysfs_nonidx_addr; | ||
114 | static unsigned int sysfs_idx_addr; | ||
115 | |||
116 | static struct pci_device_id nes_pci_table[] = { | ||
117 | {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, | ||
118 | {0} | ||
119 | }; | ||
120 | |||
121 | MODULE_DEVICE_TABLE(pci, nes_pci_table); | ||
122 | |||
123 | static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *); | ||
124 | static int nes_net_event(struct notifier_block *, unsigned long, void *); | ||
125 | static int nes_notifiers_registered; | ||
126 | |||
127 | |||
128 | static struct notifier_block nes_inetaddr_notifier = { | ||
129 | .notifier_call = nes_inetaddr_event | ||
130 | }; | ||
131 | |||
132 | static struct notifier_block nes_net_notifier = { | ||
133 | .notifier_call = nes_net_event | ||
134 | }; | ||
135 | |||
136 | |||
137 | |||
138 | |||
139 | /** | ||
140 | * nes_inetaddr_event | ||
141 | */ | ||
142 | static int nes_inetaddr_event(struct notifier_block *notifier, | ||
143 | unsigned long event, void *ptr) | ||
144 | { | ||
145 | struct in_ifaddr *ifa = ptr; | ||
146 | struct net_device *event_netdev = ifa->ifa_dev->dev; | ||
147 | struct nes_device *nesdev; | ||
148 | struct net_device *netdev; | ||
149 | struct nes_vnic *nesvnic; | ||
150 | unsigned int addr; | ||
151 | unsigned int mask; | ||
152 | |||
153 | addr = ntohl(ifa->ifa_address); | ||
154 | mask = ntohl(ifa->ifa_mask); | ||
155 | nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", | ||
156 | addr, mask); | ||
157 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
158 | nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", | ||
159 | nesdev, nesdev->netdev[0]->name); | ||
160 | netdev = nesdev->netdev[0]; | ||
161 | nesvnic = netdev_priv(netdev); | ||
162 | if (netdev == event_netdev) { | ||
163 | if (nesvnic->rdma_enabled == 0) { | ||
164 | nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" | ||
165 | " RDMA is not enabled.\n", | ||
166 | netdev->name); | ||
167 | return NOTIFY_OK; | ||
168 | } | ||
169 | /* we have ifa->ifa_address/mask here if we need it */ | ||
170 | switch (event) { | ||
171 | case NETDEV_DOWN: | ||
172 | nes_debug(NES_DBG_NETDEV, "event:DOWN\n"); | ||
173 | nes_write_indexed(nesdev, | ||
174 | NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0); | ||
175 | |||
176 | nes_manage_arp_cache(netdev, netdev->dev_addr, | ||
177 | ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); | ||
178 | nesvnic->local_ipaddr = 0; | ||
179 | return NOTIFY_OK; | ||
180 | break; | ||
181 | case NETDEV_UP: | ||
182 | nes_debug(NES_DBG_NETDEV, "event:UP\n"); | ||
183 | |||
184 | if (nesvnic->local_ipaddr != 0) { | ||
185 | nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); | ||
186 | return NOTIFY_OK; | ||
187 | } | ||
188 | /* Add the address to the IP table */ | ||
189 | nesvnic->local_ipaddr = ifa->ifa_address; | ||
190 | |||
191 | nes_write_indexed(nesdev, | ||
192 | NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), | ||
193 | ntohl(ifa->ifa_address)); | ||
194 | nes_manage_arp_cache(netdev, netdev->dev_addr, | ||
195 | ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); | ||
196 | return NOTIFY_OK; | ||
197 | break; | ||
198 | default: | ||
199 | break; | ||
200 | } | ||
201 | } | ||
202 | } | ||
203 | |||
204 | return NOTIFY_DONE; | ||
205 | } | ||
206 | |||
207 | |||
208 | /** | ||
209 | * nes_net_event | ||
210 | */ | ||
211 | static int nes_net_event(struct notifier_block *notifier, | ||
212 | unsigned long event, void *ptr) | ||
213 | { | ||
214 | struct neighbour *neigh = ptr; | ||
215 | struct nes_device *nesdev; | ||
216 | struct net_device *netdev; | ||
217 | struct nes_vnic *nesvnic; | ||
218 | |||
219 | switch (event) { | ||
220 | case NETEVENT_NEIGH_UPDATE: | ||
221 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
222 | /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */ | ||
223 | netdev = nesdev->netdev[0]; | ||
224 | nesvnic = netdev_priv(netdev); | ||
225 | if (netdev == neigh->dev) { | ||
226 | if (nesvnic->rdma_enabled == 0) { | ||
227 | nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n", | ||
228 | netdev->name); | ||
229 | } else { | ||
230 | if (neigh->nud_state & NUD_VALID) { | ||
231 | nes_manage_arp_cache(neigh->dev, neigh->ha, | ||
232 | ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD); | ||
233 | } else { | ||
234 | nes_manage_arp_cache(neigh->dev, neigh->ha, | ||
235 | ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE); | ||
236 | } | ||
237 | } | ||
238 | return NOTIFY_OK; | ||
239 | } | ||
240 | } | ||
241 | break; | ||
242 | default: | ||
243 | nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event); | ||
244 | break; | ||
245 | } | ||
246 | |||
247 | return NOTIFY_DONE; | ||
248 | } | ||
249 | |||
250 | |||
251 | /** | ||
252 | * nes_add_ref | ||
253 | */ | ||
254 | void nes_add_ref(struct ib_qp *ibqp) | ||
255 | { | ||
256 | struct nes_qp *nesqp; | ||
257 | |||
258 | nesqp = to_nesqp(ibqp); | ||
259 | nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n", | ||
260 | ibqp->qp_num, atomic_read(&nesqp->refcount)); | ||
261 | atomic_inc(&nesqp->refcount); | ||
262 | } | ||
263 | |||
264 | static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) | ||
265 | { | ||
266 | unsigned long flags; | ||
267 | struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; | ||
268 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
269 | u32 qp_id; | ||
270 | |||
271 | atomic_inc(&qps_destroyed); | ||
272 | |||
273 | /* Free the control structures */ | ||
274 | |||
275 | qp_id = nesqp->hwqp.qp_id; | ||
276 | if (nesqp->pbl_vbase) { | ||
277 | pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, | ||
278 | nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); | ||
279 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
280 | nesadapter->free_256pbl++; | ||
281 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
282 | pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase); | ||
283 | nesqp->pbl_vbase = NULL; | ||
284 | |||
285 | } else { | ||
286 | pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, | ||
287 | nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase); | ||
288 | } | ||
289 | nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id); | ||
290 | |||
291 | kfree(nesqp->allocated_buffer); | ||
292 | |||
293 | } | ||
294 | |||
295 | /** | ||
296 | * nes_rem_ref | ||
297 | */ | ||
298 | void nes_rem_ref(struct ib_qp *ibqp) | ||
299 | { | ||
300 | u64 u64temp; | ||
301 | struct nes_qp *nesqp; | ||
302 | struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); | ||
303 | struct nes_device *nesdev = nesvnic->nesdev; | ||
304 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
305 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
306 | struct nes_cqp_request *cqp_request; | ||
307 | u32 opcode; | ||
308 | |||
309 | nesqp = to_nesqp(ibqp); | ||
310 | |||
311 | if (atomic_read(&nesqp->refcount) == 0) { | ||
312 | printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n", | ||
313 | __FUNCTION__, ibqp->qp_num, nesqp->last_aeq); | ||
314 | BUG(); | ||
315 | } | ||
316 | |||
317 | if (atomic_dec_and_test(&nesqp->refcount)) { | ||
318 | nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL; | ||
319 | |||
320 | /* Destroy the QP */ | ||
321 | cqp_request = nes_get_cqp_request(nesdev); | ||
322 | if (cqp_request == NULL) { | ||
323 | nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); | ||
324 | return; | ||
325 | } | ||
326 | cqp_request->waiting = 0; | ||
327 | cqp_request->callback = 1; | ||
328 | cqp_request->cqp_callback = nes_cqp_rem_ref_callback; | ||
329 | cqp_request->cqp_callback_pointer = nesqp; | ||
330 | cqp_wqe = &cqp_request->cqp_wqe; | ||
331 | |||
332 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
333 | opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP; | ||
334 | |||
335 | if (nesqp->hte_added) { | ||
336 | opcode |= NES_CQP_QP_DEL_HTE; | ||
337 | nesqp->hte_added = 0; | ||
338 | } | ||
339 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); | ||
340 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); | ||
341 | u64temp = (u64)nesqp->nesqp_context_pbase; | ||
342 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); | ||
343 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | |||
348 | /** | ||
349 | * nes_get_qp | ||
350 | */ | ||
351 | struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) | ||
352 | { | ||
353 | struct nes_vnic *nesvnic = to_nesvnic(device); | ||
354 | struct nes_device *nesdev = nesvnic->nesdev; | ||
355 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
356 | |||
357 | if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp))) | ||
358 | return NULL; | ||
359 | |||
360 | return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; | ||
361 | } | ||
362 | |||
363 | |||
364 | /** | ||
365 | * nes_print_macaddr | ||
366 | */ | ||
367 | static void nes_print_macaddr(struct net_device *netdev) | ||
368 | { | ||
369 | nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", | ||
370 | netdev->name, | ||
371 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
372 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], | ||
373 | netdev->irq); | ||
374 | } | ||
375 | |||
376 | |||
377 | /** | ||
378 | * nes_interrupt - handle interrupts | ||
379 | */ | ||
380 | static irqreturn_t nes_interrupt(int irq, void *dev_id) | ||
381 | { | ||
382 | struct nes_device *nesdev = (struct nes_device *)dev_id; | ||
383 | int handled = 0; | ||
384 | u32 int_mask; | ||
385 | u32 int_req; | ||
386 | u32 int_stat; | ||
387 | u32 intf_int_stat; | ||
388 | u32 timer_stat; | ||
389 | |||
390 | if (nesdev->msi_enabled) { | ||
391 | /* No need to read the interrupt pending register if msi is enabled */ | ||
392 | handled = 1; | ||
393 | } else { | ||
394 | if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) { | ||
395 | /* Master interrupt enable provides synchronization for kicking off bottom half | ||
396 | when interrupt sharing is going on */ | ||
397 | int_mask = nes_read32(nesdev->regs + NES_INT_MASK); | ||
398 | if (int_mask & 0x80000000) { | ||
399 | /* Check interrupt status to see if this might be ours */ | ||
400 | int_stat = nes_read32(nesdev->regs + NES_INT_STAT); | ||
401 | int_req = nesdev->int_req; | ||
402 | if (int_stat&int_req) { | ||
403 | /* if interesting CEQ or AEQ is pending, claim the interrupt */ | ||
404 | if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) { | ||
405 | handled = 1; | ||
406 | } else { | ||
407 | if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) { | ||
408 | /* Timer might be running but might be for another function */ | ||
409 | timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT); | ||
410 | if ((timer_stat & nesdev->timer_int_req) != 0) { | ||
411 | handled = 1; | ||
412 | } | ||
413 | } | ||
414 | if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) && | ||
415 | (handled == 0)) { | ||
416 | intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); | ||
417 | if ((intf_int_stat & nesdev->intf_int_req) != 0) { | ||
418 | handled = 1; | ||
419 | } | ||
420 | } | ||
421 | } | ||
422 | if (handled) { | ||
423 | nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000)); | ||
424 | int_mask = nes_read32(nesdev->regs+NES_INT_MASK); | ||
425 | /* Save off the status to save an additional read */ | ||
426 | nesdev->int_stat = int_stat; | ||
427 | nesdev->napi_isr_ran = 1; | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | } else { | ||
432 | handled = nes_read32(nesdev->regs+NES_INT_PENDING); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | if (handled) { | ||
437 | |||
438 | if (nes_napi_isr(nesdev) == 0) { | ||
439 | tasklet_schedule(&nesdev->dpc_tasklet); | ||
440 | |||
441 | } | ||
442 | return IRQ_HANDLED; | ||
443 | } else { | ||
444 | return IRQ_NONE; | ||
445 | } | ||
446 | } | ||
447 | |||
448 | |||
449 | /** | ||
450 | * nes_probe - Device initialization | ||
451 | */ | ||
452 | static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | ||
453 | { | ||
454 | struct net_device *netdev = NULL; | ||
455 | struct nes_device *nesdev = NULL; | ||
456 | int ret = 0; | ||
457 | struct nes_vnic *nesvnic = NULL; | ||
458 | void __iomem *mmio_regs = NULL; | ||
459 | u8 hw_rev; | ||
460 | |||
461 | assert(pcidev != NULL); | ||
462 | assert(ent != NULL); | ||
463 | |||
464 | printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n", | ||
465 | DRV_VERSION, pci_name(pcidev)); | ||
466 | |||
467 | ret = pci_enable_device(pcidev); | ||
468 | if (ret) { | ||
469 | printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev)); | ||
470 | goto bail0; | ||
471 | } | ||
472 | |||
473 | nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n", | ||
474 | (long unsigned int)pci_resource_start(pcidev, BAR_0), | ||
475 | (long unsigned int)pci_resource_len(pcidev, BAR_0)); | ||
476 | nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n", | ||
477 | (long unsigned int)pci_resource_start(pcidev, BAR_1), | ||
478 | (long unsigned int)pci_resource_len(pcidev, BAR_1)); | ||
479 | |||
480 | /* Make sure PCI base addr are MMIO */ | ||
481 | if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) || | ||
482 | !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) { | ||
483 | printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); | ||
484 | ret = -ENODEV; | ||
485 | goto bail1; | ||
486 | } | ||
487 | |||
488 | /* Reserve PCI I/O and memory resources */ | ||
489 | ret = pci_request_regions(pcidev, DRV_NAME); | ||
490 | if (ret) { | ||
491 | printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev)); | ||
492 | goto bail1; | ||
493 | } | ||
494 | |||
495 | if ((sizeof(dma_addr_t) > 4)) { | ||
496 | ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK); | ||
497 | if (ret < 0) { | ||
498 | printk(KERN_ERR PFX "64b DMA mask configuration failed\n"); | ||
499 | goto bail2; | ||
500 | } | ||
501 | ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK); | ||
502 | if (ret) { | ||
503 | printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n"); | ||
504 | goto bail2; | ||
505 | } | ||
506 | } else { | ||
507 | ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK); | ||
508 | if (ret < 0) { | ||
509 | printk(KERN_ERR PFX "32b DMA mask configuration failed\n"); | ||
510 | goto bail2; | ||
511 | } | ||
512 | ret = pci_set_consistent_dma_mask(pcidev, DMA_32BIT_MASK); | ||
513 | if (ret) { | ||
514 | printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n"); | ||
515 | goto bail2; | ||
516 | } | ||
517 | } | ||
518 | |||
519 | pci_set_master(pcidev); | ||
520 | |||
521 | /* Allocate hardware structure */ | ||
522 | nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL); | ||
523 | if (!nesdev) { | ||
524 | printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev)); | ||
525 | ret = -ENOMEM; | ||
526 | goto bail2; | ||
527 | } | ||
528 | |||
529 | nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev); | ||
530 | nesdev->pcidev = pcidev; | ||
531 | pci_set_drvdata(pcidev, nesdev); | ||
532 | |||
533 | pci_read_config_byte(pcidev, 0x0008, &hw_rev); | ||
534 | nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev); | ||
535 | |||
536 | spin_lock_init(&nesdev->indexed_regs_lock); | ||
537 | |||
538 | /* Remap the PCI registers in adapter BAR0 to kernel VA space */ | ||
539 | mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs)); | ||
540 | if (mmio_regs == NULL) { | ||
541 | printk(KERN_ERR PFX "Unable to remap BAR0\n"); | ||
542 | ret = -EIO; | ||
543 | goto bail3; | ||
544 | } | ||
545 | nesdev->regs = mmio_regs; | ||
546 | nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs; | ||
547 | |||
548 | /* Ensure interrupts are disabled */ | ||
549 | nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff); | ||
550 | |||
551 | if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) { | ||
552 | if (!pci_enable_msi(nesdev->pcidev)) { | ||
553 | nesdev->msi_enabled = 1; | ||
554 | nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n", | ||
555 | pci_name(pcidev)); | ||
556 | } else { | ||
557 | nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n", | ||
558 | pci_name(pcidev)); | ||
559 | } | ||
560 | } else { | ||
561 | nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n", | ||
562 | pci_name(pcidev)); | ||
563 | } | ||
564 | |||
565 | nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0); | ||
566 | nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1); | ||
567 | |||
568 | /* Init the adapter */ | ||
569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); | ||
570 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
571 | if (!nesdev->nesadapter) { | ||
572 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); | ||
573 | ret = -ENOMEM; | ||
574 | goto bail5; | ||
575 | } | ||
576 | |||
577 | /* nesdev->base_doorbell_index = | ||
578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ | ||
579 | nesdev->base_doorbell_index = 1; | ||
580 | nesdev->doorbell_start = nesdev->nesadapter->doorbell_start; | ||
581 | nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count; | ||
582 | |||
583 | tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); | ||
584 | |||
585 | /* bring up the Control QP */ | ||
586 | if (nes_init_cqp(nesdev)) { | ||
587 | ret = -ENODEV; | ||
588 | goto bail6; | ||
589 | } | ||
590 | |||
591 | /* Arm the CCQ */ | ||
592 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | ||
593 | PCI_FUNC(nesdev->pcidev->devfn)); | ||
594 | nes_read32(nesdev->regs+NES_CQE_ALLOC); | ||
595 | |||
596 | /* Enable the interrupts */ | ||
597 | nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) | | ||
598 | (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); | ||
599 | if (PCI_FUNC(nesdev->pcidev->devfn) < 4) { | ||
600 | nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24)); | ||
601 | } | ||
602 | |||
603 | /* TODO: This really should be the first driver to load, not function 0 */ | ||
604 | if (PCI_FUNC(nesdev->pcidev->devfn) == 0) { | ||
605 | /* pick up PCI and critical errors if the first driver to load */ | ||
606 | nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR; | ||
607 | nesdev->int_req |= NES_INT_INTF; | ||
608 | } else { | ||
609 | nesdev->intf_int_req = 0; | ||
610 | } | ||
611 | nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); | ||
612 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0); | ||
613 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0); | ||
614 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265); | ||
615 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804); | ||
616 | |||
617 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790); | ||
618 | |||
619 | /* deal with both periodic and one_shot */ | ||
620 | nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn); | ||
621 | nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req; | ||
622 | nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n", | ||
623 | PCI_FUNC(nesdev->pcidev->devfn), | ||
624 | nesdev->timer_int_req, nesdev->nesadapter->timer_int_req); | ||
625 | |||
626 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | ||
627 | |||
628 | list_add_tail(&nesdev->list, &nes_dev_list); | ||
629 | |||
630 | /* Request an interrupt line for the driver */ | ||
631 | ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev); | ||
632 | if (ret) { | ||
633 | printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", | ||
634 | pci_name(pcidev), pcidev->irq); | ||
635 | goto bail65; | ||
636 | } | ||
637 | |||
638 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | ||
639 | |||
640 | if (nes_notifiers_registered == 0) { | ||
641 | register_inetaddr_notifier(&nes_inetaddr_notifier); | ||
642 | register_netevent_notifier(&nes_net_notifier); | ||
643 | } | ||
644 | nes_notifiers_registered++; | ||
645 | |||
646 | /* Initialize network devices */ | ||
647 | if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) { | ||
648 | goto bail7; | ||
649 | } | ||
650 | |||
651 | /* Register network device */ | ||
652 | ret = register_netdev(netdev); | ||
653 | if (ret) { | ||
654 | printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); | ||
655 | nes_netdev_destroy(netdev); | ||
656 | goto bail7; | ||
657 | } | ||
658 | |||
659 | nes_print_macaddr(netdev); | ||
660 | /* create a CM core for this netdev */ | ||
661 | nesvnic = netdev_priv(netdev); | ||
662 | |||
663 | nesdev->netdev_count++; | ||
664 | nesdev->nesadapter->netdev_count++; | ||
665 | |||
666 | |||
667 | printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", | ||
668 | pci_name(pcidev)); | ||
669 | return 0; | ||
670 | |||
671 | bail7: | ||
672 | printk(KERN_ERR PFX "bail7\n"); | ||
673 | while (nesdev->netdev_count > 0) { | ||
674 | nesdev->netdev_count--; | ||
675 | nesdev->nesadapter->netdev_count--; | ||
676 | |||
677 | unregister_netdev(nesdev->netdev[nesdev->netdev_count]); | ||
678 | nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]); | ||
679 | } | ||
680 | |||
681 | nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n", | ||
682 | nesdev->netdev_count, nesdev->nesadapter->netdev_count); | ||
683 | |||
684 | nes_notifiers_registered--; | ||
685 | if (nes_notifiers_registered == 0) { | ||
686 | unregister_netevent_notifier(&nes_net_notifier); | ||
687 | unregister_inetaddr_notifier(&nes_inetaddr_notifier); | ||
688 | } | ||
689 | |||
690 | list_del(&nesdev->list); | ||
691 | nes_destroy_cqp(nesdev); | ||
692 | |||
693 | bail65: | ||
694 | printk(KERN_ERR PFX "bail65\n"); | ||
695 | free_irq(pcidev->irq, nesdev); | ||
696 | if (nesdev->msi_enabled) { | ||
697 | pci_disable_msi(pcidev); | ||
698 | } | ||
699 | bail6: | ||
700 | printk(KERN_ERR PFX "bail6\n"); | ||
701 | tasklet_kill(&nesdev->dpc_tasklet); | ||
702 | /* Deallocate the Adapter Structure */ | ||
703 | nes_destroy_adapter(nesdev->nesadapter); | ||
704 | |||
705 | bail5: | ||
706 | printk(KERN_ERR PFX "bail5\n"); | ||
707 | iounmap(nesdev->regs); | ||
708 | |||
709 | bail3: | ||
710 | printk(KERN_ERR PFX "bail3\n"); | ||
711 | kfree(nesdev); | ||
712 | |||
713 | bail2: | ||
714 | pci_release_regions(pcidev); | ||
715 | |||
716 | bail1: | ||
717 | pci_disable_device(pcidev); | ||
718 | |||
719 | bail0: | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | |||
724 | /** | ||
725 | * nes_remove - unload from kernel | ||
726 | */ | ||
727 | static void __devexit nes_remove(struct pci_dev *pcidev) | ||
728 | { | ||
729 | struct nes_device *nesdev = pci_get_drvdata(pcidev); | ||
730 | struct net_device *netdev; | ||
731 | int netdev_index = 0; | ||
732 | |||
733 | if (nesdev->netdev_count) { | ||
734 | netdev = nesdev->netdev[netdev_index]; | ||
735 | if (netdev) { | ||
736 | netif_stop_queue(netdev); | ||
737 | unregister_netdev(netdev); | ||
738 | nes_netdev_destroy(netdev); | ||
739 | |||
740 | nesdev->netdev[netdev_index] = NULL; | ||
741 | nesdev->netdev_count--; | ||
742 | nesdev->nesadapter->netdev_count--; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | nes_notifiers_registered--; | ||
747 | if (nes_notifiers_registered == 0) { | ||
748 | unregister_netevent_notifier(&nes_net_notifier); | ||
749 | unregister_inetaddr_notifier(&nes_inetaddr_notifier); | ||
750 | } | ||
751 | |||
752 | list_del(&nesdev->list); | ||
753 | nes_destroy_cqp(nesdev); | ||
754 | tasklet_kill(&nesdev->dpc_tasklet); | ||
755 | |||
756 | /* Deallocate the Adapter Structure */ | ||
757 | nes_destroy_adapter(nesdev->nesadapter); | ||
758 | |||
759 | free_irq(pcidev->irq, nesdev); | ||
760 | |||
761 | if (nesdev->msi_enabled) { | ||
762 | pci_disable_msi(pcidev); | ||
763 | } | ||
764 | |||
765 | iounmap(nesdev->regs); | ||
766 | kfree(nesdev); | ||
767 | |||
768 | /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */ | ||
769 | pci_release_regions(pcidev); | ||
770 | pci_disable_device(pcidev); | ||
771 | pci_set_drvdata(pcidev, NULL); | ||
772 | } | ||
773 | |||
774 | |||
775 | static struct pci_driver nes_pci_driver = { | ||
776 | .name = DRV_NAME, | ||
777 | .id_table = nes_pci_table, | ||
778 | .probe = nes_probe, | ||
779 | .remove = __devexit_p(nes_remove), | ||
780 | }; | ||
781 | |||
782 | static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) | ||
783 | { | ||
784 | unsigned int devfn = 0xffffffff; | ||
785 | unsigned char bus_number = 0xff; | ||
786 | unsigned int i = 0; | ||
787 | struct nes_device *nesdev; | ||
788 | |||
789 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
790 | if (i == ee_flsh_adapter) { | ||
791 | devfn = nesdev->nesadapter->devfn; | ||
792 | bus_number = nesdev->nesadapter->bus_number; | ||
793 | break; | ||
794 | } | ||
795 | i++; | ||
796 | } | ||
797 | |||
798 | return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn); | ||
799 | } | ||
800 | |||
801 | static ssize_t nes_store_adapter(struct device_driver *ddp, | ||
802 | const char *buf, size_t count) | ||
803 | { | ||
804 | char *p = (char *)buf; | ||
805 | |||
806 | ee_flsh_adapter = simple_strtoul(p, &p, 10); | ||
807 | return strnlen(buf, count); | ||
808 | } | ||
809 | |||
810 | static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) | ||
811 | { | ||
812 | u32 eeprom_cmd = 0xdead; | ||
813 | u32 i = 0; | ||
814 | struct nes_device *nesdev; | ||
815 | |||
816 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
817 | if (i == ee_flsh_adapter) { | ||
818 | eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND); | ||
819 | break; | ||
820 | } | ||
821 | i++; | ||
822 | } | ||
823 | return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd); | ||
824 | } | ||
825 | |||
826 | static ssize_t nes_store_ee_cmd(struct device_driver *ddp, | ||
827 | const char *buf, size_t count) | ||
828 | { | ||
829 | char *p = (char *)buf; | ||
830 | u32 val; | ||
831 | u32 i = 0; | ||
832 | struct nes_device *nesdev; | ||
833 | |||
834 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
835 | val = simple_strtoul(p, &p, 16); | ||
836 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
837 | if (i == ee_flsh_adapter) { | ||
838 | nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val); | ||
839 | break; | ||
840 | } | ||
841 | i++; | ||
842 | } | ||
843 | } | ||
844 | return strnlen(buf, count); | ||
845 | } | ||
846 | |||
847 | static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) | ||
848 | { | ||
849 | u32 eeprom_data = 0xdead; | ||
850 | u32 i = 0; | ||
851 | struct nes_device *nesdev; | ||
852 | |||
853 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
854 | if (i == ee_flsh_adapter) { | ||
855 | eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA); | ||
856 | break; | ||
857 | } | ||
858 | i++; | ||
859 | } | ||
860 | |||
861 | return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data); | ||
862 | } | ||
863 | |||
864 | static ssize_t nes_store_ee_data(struct device_driver *ddp, | ||
865 | const char *buf, size_t count) | ||
866 | { | ||
867 | char *p = (char *)buf; | ||
868 | u32 val; | ||
869 | u32 i = 0; | ||
870 | struct nes_device *nesdev; | ||
871 | |||
872 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
873 | val = simple_strtoul(p, &p, 16); | ||
874 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
875 | if (i == ee_flsh_adapter) { | ||
876 | nes_write32(nesdev->regs + NES_EEPROM_DATA, val); | ||
877 | break; | ||
878 | } | ||
879 | i++; | ||
880 | } | ||
881 | } | ||
882 | return strnlen(buf, count); | ||
883 | } | ||
884 | |||
885 | static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) | ||
886 | { | ||
887 | u32 flash_cmd = 0xdead; | ||
888 | u32 i = 0; | ||
889 | struct nes_device *nesdev; | ||
890 | |||
891 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
892 | if (i == ee_flsh_adapter) { | ||
893 | flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND); | ||
894 | break; | ||
895 | } | ||
896 | i++; | ||
897 | } | ||
898 | |||
899 | return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd); | ||
900 | } | ||
901 | |||
902 | static ssize_t nes_store_flash_cmd(struct device_driver *ddp, | ||
903 | const char *buf, size_t count) | ||
904 | { | ||
905 | char *p = (char *)buf; | ||
906 | u32 val; | ||
907 | u32 i = 0; | ||
908 | struct nes_device *nesdev; | ||
909 | |||
910 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
911 | val = simple_strtoul(p, &p, 16); | ||
912 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
913 | if (i == ee_flsh_adapter) { | ||
914 | nes_write32(nesdev->regs + NES_FLASH_COMMAND, val); | ||
915 | break; | ||
916 | } | ||
917 | i++; | ||
918 | } | ||
919 | } | ||
920 | return strnlen(buf, count); | ||
921 | } | ||
922 | |||
923 | static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) | ||
924 | { | ||
925 | u32 flash_data = 0xdead; | ||
926 | u32 i = 0; | ||
927 | struct nes_device *nesdev; | ||
928 | |||
929 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
930 | if (i == ee_flsh_adapter) { | ||
931 | flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA); | ||
932 | break; | ||
933 | } | ||
934 | i++; | ||
935 | } | ||
936 | |||
937 | return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data); | ||
938 | } | ||
939 | |||
940 | static ssize_t nes_store_flash_data(struct device_driver *ddp, | ||
941 | const char *buf, size_t count) | ||
942 | { | ||
943 | char *p = (char *)buf; | ||
944 | u32 val; | ||
945 | u32 i = 0; | ||
946 | struct nes_device *nesdev; | ||
947 | |||
948 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
949 | val = simple_strtoul(p, &p, 16); | ||
950 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
951 | if (i == ee_flsh_adapter) { | ||
952 | nes_write32(nesdev->regs + NES_FLASH_DATA, val); | ||
953 | break; | ||
954 | } | ||
955 | i++; | ||
956 | } | ||
957 | } | ||
958 | return strnlen(buf, count); | ||
959 | } | ||
960 | |||
961 | static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf) | ||
962 | { | ||
963 | return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr); | ||
964 | } | ||
965 | |||
966 | static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, | ||
967 | const char *buf, size_t count) | ||
968 | { | ||
969 | char *p = (char *)buf; | ||
970 | |||
971 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') | ||
972 | sysfs_nonidx_addr = simple_strtoul(p, &p, 16); | ||
973 | |||
974 | return strnlen(buf, count); | ||
975 | } | ||
976 | |||
977 | static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) | ||
978 | { | ||
979 | u32 nonidx_data = 0xdead; | ||
980 | u32 i = 0; | ||
981 | struct nes_device *nesdev; | ||
982 | |||
983 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
984 | if (i == ee_flsh_adapter) { | ||
985 | nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr); | ||
986 | break; | ||
987 | } | ||
988 | i++; | ||
989 | } | ||
990 | |||
991 | return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data); | ||
992 | } | ||
993 | |||
994 | static ssize_t nes_store_nonidx_data(struct device_driver *ddp, | ||
995 | const char *buf, size_t count) | ||
996 | { | ||
997 | char *p = (char *)buf; | ||
998 | u32 val; | ||
999 | u32 i = 0; | ||
1000 | struct nes_device *nesdev; | ||
1001 | |||
1002 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
1003 | val = simple_strtoul(p, &p, 16); | ||
1004 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
1005 | if (i == ee_flsh_adapter) { | ||
1006 | nes_write32(nesdev->regs + sysfs_nonidx_addr, val); | ||
1007 | break; | ||
1008 | } | ||
1009 | i++; | ||
1010 | } | ||
1011 | } | ||
1012 | return strnlen(buf, count); | ||
1013 | } | ||
1014 | |||
1015 | static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf) | ||
1016 | { | ||
1017 | return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr); | ||
1018 | } | ||
1019 | |||
1020 | static ssize_t nes_store_idx_addr(struct device_driver *ddp, | ||
1021 | const char *buf, size_t count) | ||
1022 | { | ||
1023 | char *p = (char *)buf; | ||
1024 | |||
1025 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') | ||
1026 | sysfs_idx_addr = simple_strtoul(p, &p, 16); | ||
1027 | |||
1028 | return strnlen(buf, count); | ||
1029 | } | ||
1030 | |||
1031 | static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) | ||
1032 | { | ||
1033 | u32 idx_data = 0xdead; | ||
1034 | u32 i = 0; | ||
1035 | struct nes_device *nesdev; | ||
1036 | |||
1037 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
1038 | if (i == ee_flsh_adapter) { | ||
1039 | idx_data = nes_read_indexed(nesdev, sysfs_idx_addr); | ||
1040 | break; | ||
1041 | } | ||
1042 | i++; | ||
1043 | } | ||
1044 | |||
1045 | return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data); | ||
1046 | } | ||
1047 | |||
1048 | static ssize_t nes_store_idx_data(struct device_driver *ddp, | ||
1049 | const char *buf, size_t count) | ||
1050 | { | ||
1051 | char *p = (char *)buf; | ||
1052 | u32 val; | ||
1053 | u32 i = 0; | ||
1054 | struct nes_device *nesdev; | ||
1055 | |||
1056 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
1057 | val = simple_strtoul(p, &p, 16); | ||
1058 | list_for_each_entry(nesdev, &nes_dev_list, list) { | ||
1059 | if (i == ee_flsh_adapter) { | ||
1060 | nes_write_indexed(nesdev, sysfs_idx_addr, val); | ||
1061 | break; | ||
1062 | } | ||
1063 | i++; | ||
1064 | } | ||
1065 | } | ||
1066 | return strnlen(buf, count); | ||
1067 | } | ||
1068 | |||
1069 | static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, | ||
1070 | nes_show_adapter, nes_store_adapter); | ||
1071 | static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, | ||
1072 | nes_show_ee_cmd, nes_store_ee_cmd); | ||
1073 | static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR, | ||
1074 | nes_show_ee_data, nes_store_ee_data); | ||
1075 | static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR, | ||
1076 | nes_show_flash_cmd, nes_store_flash_cmd); | ||
1077 | static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR, | ||
1078 | nes_show_flash_data, nes_store_flash_data); | ||
1079 | static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR, | ||
1080 | nes_show_nonidx_addr, nes_store_nonidx_addr); | ||
1081 | static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR, | ||
1082 | nes_show_nonidx_data, nes_store_nonidx_data); | ||
1083 | static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR, | ||
1084 | nes_show_idx_addr, nes_store_idx_addr); | ||
1085 | static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, | ||
1086 | nes_show_idx_data, nes_store_idx_data); | ||
1087 | |||
1088 | static int nes_create_driver_sysfs(struct pci_driver *drv) | ||
1089 | { | ||
1090 | int error; | ||
1091 | error = driver_create_file(&drv->driver, &driver_attr_adapter); | ||
1092 | error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd); | ||
1093 | error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data); | ||
1094 | error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd); | ||
1095 | error |= driver_create_file(&drv->driver, &driver_attr_flash_data); | ||
1096 | error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr); | ||
1097 | error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data); | ||
1098 | error |= driver_create_file(&drv->driver, &driver_attr_idx_addr); | ||
1099 | error |= driver_create_file(&drv->driver, &driver_attr_idx_data); | ||
1100 | return error; | ||
1101 | } | ||
1102 | |||
1103 | static void nes_remove_driver_sysfs(struct pci_driver *drv) | ||
1104 | { | ||
1105 | driver_remove_file(&drv->driver, &driver_attr_adapter); | ||
1106 | driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd); | ||
1107 | driver_remove_file(&drv->driver, &driver_attr_eeprom_data); | ||
1108 | driver_remove_file(&drv->driver, &driver_attr_flash_cmd); | ||
1109 | driver_remove_file(&drv->driver, &driver_attr_flash_data); | ||
1110 | driver_remove_file(&drv->driver, &driver_attr_nonidx_addr); | ||
1111 | driver_remove_file(&drv->driver, &driver_attr_nonidx_data); | ||
1112 | driver_remove_file(&drv->driver, &driver_attr_idx_addr); | ||
1113 | driver_remove_file(&drv->driver, &driver_attr_idx_data); | ||
1114 | } | ||
1115 | |||
1116 | /** | ||
1117 | * nes_init_module - module initialization entry point | ||
1118 | */ | ||
1119 | static int __init nes_init_module(void) | ||
1120 | { | ||
1121 | int retval; | ||
1122 | int retval1; | ||
1123 | |||
1124 | retval = nes_cm_start(); | ||
1125 | if (retval) { | ||
1126 | printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n"); | ||
1127 | return retval; | ||
1128 | } | ||
1129 | retval = pci_register_driver(&nes_pci_driver); | ||
1130 | if (retval >= 0) { | ||
1131 | retval1 = nes_create_driver_sysfs(&nes_pci_driver); | ||
1132 | if (retval1 < 0) | ||
1133 | printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n"); | ||
1134 | } | ||
1135 | return retval; | ||
1136 | } | ||
1137 | |||
1138 | |||
1139 | /** | ||
1140 | * nes_exit_module - module unload entry point | ||
1141 | */ | ||
1142 | static void __exit nes_exit_module(void) | ||
1143 | { | ||
1144 | nes_cm_stop(); | ||
1145 | nes_remove_driver_sysfs(&nes_pci_driver); | ||
1146 | |||
1147 | pci_unregister_driver(&nes_pci_driver); | ||
1148 | } | ||
1149 | |||
1150 | |||
1151 | module_init(nes_init_module); | ||
1152 | module_exit(nes_exit_module); | ||
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h new file mode 100644 index 000000000000..fd57e8a1582f --- /dev/null +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -0,0 +1,560 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef __NES_H | ||
35 | #define __NES_H | ||
36 | |||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/inetdevice.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/dma-mapping.h> | ||
44 | #include <linux/workqueue.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <asm/semaphore.h> | ||
47 | #include <linux/version.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <linux/crc32c.h> | ||
50 | |||
51 | #include <rdma/ib_smi.h> | ||
52 | #include <rdma/ib_verbs.h> | ||
53 | #include <rdma/ib_pack.h> | ||
54 | #include <rdma/rdma_cm.h> | ||
55 | #include <rdma/iw_cm.h> | ||
56 | |||
57 | #define NES_SEND_FIRST_WRITE | ||
58 | |||
59 | #define QUEUE_DISCONNECTS | ||
60 | |||
61 | #define DRV_BUILD "1" | ||
62 | |||
63 | #define DRV_NAME "iw_nes" | ||
64 | #define DRV_VERSION "1.0 KO Build " DRV_BUILD | ||
65 | #define PFX DRV_NAME ": " | ||
66 | |||
67 | /* | ||
68 | * NetEffect PCI vendor id and NE010 PCI device id. | ||
69 | */ | ||
70 | #ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */ | ||
71 | #define PCI_VENDOR_ID_NETEFFECT 0x1678 | ||
72 | #define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100 | ||
73 | #endif | ||
74 | |||
75 | #define NE020_REV 4 | ||
76 | #define NE020_REV1 5 | ||
77 | |||
78 | #define BAR_0 0 | ||
79 | #define BAR_1 2 | ||
80 | |||
81 | #define RX_BUF_SIZE (1536 + 8) | ||
82 | #define NES_REG0_SIZE (4 * 1024) | ||
83 | #define NES_TX_TIMEOUT (6*HZ) | ||
84 | #define NES_FIRST_QPN 64 | ||
85 | #define NES_SW_CONTEXT_ALIGN 1024 | ||
86 | |||
87 | #define NES_NIC_MAX_NICS 16 | ||
88 | #define NES_MAX_ARP_TABLE_SIZE 4096 | ||
89 | |||
90 | #define NES_NIC_CEQ_SIZE 8 | ||
91 | /* NICs will be on a separate CQ */ | ||
92 | #define NES_CCEQ_SIZE ((nesadapter->max_cq / nesadapter->port_count) - 32) | ||
93 | |||
94 | #define NES_MAX_PORT_COUNT 4 | ||
95 | |||
96 | #define MAX_DPC_ITERATIONS 128 | ||
97 | |||
98 | #define NES_CQP_REQUEST_NO_DOORBELL_RING 0 | ||
99 | #define NES_CQP_REQUEST_RING_DOORBELL 1 | ||
100 | |||
101 | #define NES_DRV_OPT_ENABLE_MPA_VER_0 0x00000001 | ||
102 | #define NES_DRV_OPT_DISABLE_MPA_CRC 0x00000002 | ||
103 | #define NES_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004 | ||
104 | #define NES_DRV_OPT_DISABLE_INTF 0x00000008 | ||
105 | #define NES_DRV_OPT_ENABLE_MSI 0x00000010 | ||
106 | #define NES_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020 | ||
107 | #define NES_DRV_OPT_SUPRESS_OPTION_BC 0x00000040 | ||
108 | #define NES_DRV_OPT_NO_INLINE_DATA 0x00000080 | ||
109 | #define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100 | ||
110 | #define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 | ||
111 | |||
112 | #define NES_AEQ_EVENT_TIMEOUT 2500 | ||
113 | #define NES_DISCONNECT_EVENT_TIMEOUT 2000 | ||
114 | |||
115 | /* debug levels */ | ||
116 | /* must match userspace */ | ||
117 | #define NES_DBG_HW 0x00000001 | ||
118 | #define NES_DBG_INIT 0x00000002 | ||
119 | #define NES_DBG_ISR 0x00000004 | ||
120 | #define NES_DBG_PHY 0x00000008 | ||
121 | #define NES_DBG_NETDEV 0x00000010 | ||
122 | #define NES_DBG_CM 0x00000020 | ||
123 | #define NES_DBG_CM1 0x00000040 | ||
124 | #define NES_DBG_NIC_RX 0x00000080 | ||
125 | #define NES_DBG_NIC_TX 0x00000100 | ||
126 | #define NES_DBG_CQP 0x00000200 | ||
127 | #define NES_DBG_MMAP 0x00000400 | ||
128 | #define NES_DBG_MR 0x00000800 | ||
129 | #define NES_DBG_PD 0x00001000 | ||
130 | #define NES_DBG_CQ 0x00002000 | ||
131 | #define NES_DBG_QP 0x00004000 | ||
132 | #define NES_DBG_MOD_QP 0x00008000 | ||
133 | #define NES_DBG_AEQ 0x00010000 | ||
134 | #define NES_DBG_IW_RX 0x00020000 | ||
135 | #define NES_DBG_IW_TX 0x00040000 | ||
136 | #define NES_DBG_SHUTDOWN 0x00080000 | ||
137 | #define NES_DBG_RSVD1 0x10000000 | ||
138 | #define NES_DBG_RSVD2 0x20000000 | ||
139 | #define NES_DBG_RSVD3 0x40000000 | ||
140 | #define NES_DBG_RSVD4 0x80000000 | ||
141 | #define NES_DBG_ALL 0xffffffff | ||
142 | |||
143 | #ifdef CONFIG_INFINIBAND_NES_DEBUG | ||
144 | #define nes_debug(level, fmt, args...) \ | ||
145 | if (level & nes_debug_level) \ | ||
146 | printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args) | ||
147 | |||
148 | #define assert(expr) \ | ||
149 | if (!(expr)) { \ | ||
150 | printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ | ||
151 | #expr, __FILE__, __FUNCTION__, __LINE__); \ | ||
152 | } | ||
153 | |||
154 | #define NES_EVENT_TIMEOUT 1200000 | ||
155 | #else | ||
156 | #define nes_debug(level, fmt, args...) | ||
157 | #define assert(expr) do {} while (0) | ||
158 | |||
159 | #define NES_EVENT_TIMEOUT 100000 | ||
160 | #endif | ||
161 | |||
162 | #include "nes_hw.h" | ||
163 | #include "nes_verbs.h" | ||
164 | #include "nes_context.h" | ||
165 | #include "nes_user.h" | ||
166 | #include "nes_cm.h" | ||
167 | |||
168 | extern int max_mtu; | ||
169 | extern int nics_per_function; | ||
170 | #define max_frame_len (max_mtu+ETH_HLEN) | ||
171 | extern int interrupt_mod_interval; | ||
172 | extern int nes_if_count; | ||
173 | extern int mpa_version; | ||
174 | extern int disable_mpa_crc; | ||
175 | extern unsigned int send_first; | ||
176 | extern unsigned int nes_drv_opt; | ||
177 | extern unsigned int nes_debug_level; | ||
178 | |||
179 | extern struct list_head nes_adapter_list; | ||
180 | extern struct list_head nes_dev_list; | ||
181 | |||
182 | extern struct nes_cm_core *g_cm_core; | ||
183 | |||
184 | extern atomic_t cm_connects; | ||
185 | extern atomic_t cm_accepts; | ||
186 | extern atomic_t cm_disconnects; | ||
187 | extern atomic_t cm_closes; | ||
188 | extern atomic_t cm_connecteds; | ||
189 | extern atomic_t cm_connect_reqs; | ||
190 | extern atomic_t cm_rejects; | ||
191 | extern atomic_t mod_qp_timouts; | ||
192 | extern atomic_t qps_created; | ||
193 | extern atomic_t qps_destroyed; | ||
194 | extern atomic_t sw_qps_destroyed; | ||
195 | extern u32 mh_detected; | ||
196 | extern u32 mh_pauses_sent; | ||
197 | extern u32 cm_packets_sent; | ||
198 | extern u32 cm_packets_bounced; | ||
199 | extern u32 cm_packets_created; | ||
200 | extern u32 cm_packets_received; | ||
201 | extern u32 cm_packets_dropped; | ||
202 | extern u32 cm_packets_retrans; | ||
203 | extern u32 cm_listens_created; | ||
204 | extern u32 cm_listens_destroyed; | ||
205 | extern u32 cm_backlog_drops; | ||
206 | extern atomic_t cm_loopbacks; | ||
207 | extern atomic_t cm_nodes_created; | ||
208 | extern atomic_t cm_nodes_destroyed; | ||
209 | extern atomic_t cm_accel_dropped_pkts; | ||
210 | extern atomic_t cm_resets_recvd; | ||
211 | |||
212 | extern u32 crit_err_count; | ||
213 | extern u32 int_mod_timer_init; | ||
214 | extern u32 int_mod_cq_depth_256; | ||
215 | extern u32 int_mod_cq_depth_128; | ||
216 | extern u32 int_mod_cq_depth_32; | ||
217 | extern u32 int_mod_cq_depth_24; | ||
218 | extern u32 int_mod_cq_depth_16; | ||
219 | extern u32 int_mod_cq_depth_4; | ||
220 | extern u32 int_mod_cq_depth_1; | ||
221 | |||
222 | extern atomic_t cqp_reqs_allocated; | ||
223 | extern atomic_t cqp_reqs_freed; | ||
224 | extern atomic_t cqp_reqs_dynallocated; | ||
225 | extern atomic_t cqp_reqs_dynfreed; | ||
226 | extern atomic_t cqp_reqs_queued; | ||
227 | extern atomic_t cqp_reqs_redriven; | ||
228 | |||
229 | |||
230 | struct nes_device { | ||
231 | struct nes_adapter *nesadapter; | ||
232 | void __iomem *regs; | ||
233 | void __iomem *index_reg; | ||
234 | struct pci_dev *pcidev; | ||
235 | struct net_device *netdev[NES_NIC_MAX_NICS]; | ||
236 | u64 link_status_interrupts; | ||
237 | struct tasklet_struct dpc_tasklet; | ||
238 | spinlock_t indexed_regs_lock; | ||
239 | unsigned long csr_start; | ||
240 | unsigned long doorbell_region; | ||
241 | unsigned long doorbell_start; | ||
242 | unsigned long mac_tx_errors; | ||
243 | unsigned long mac_pause_frames_sent; | ||
244 | unsigned long mac_pause_frames_received; | ||
245 | unsigned long mac_rx_errors; | ||
246 | unsigned long mac_rx_crc_errors; | ||
247 | unsigned long mac_rx_symbol_err_frames; | ||
248 | unsigned long mac_rx_jabber_frames; | ||
249 | unsigned long mac_rx_oversized_frames; | ||
250 | unsigned long mac_rx_short_frames; | ||
251 | unsigned long port_rx_discards; | ||
252 | unsigned long port_tx_discards; | ||
253 | unsigned int mac_index; | ||
254 | unsigned int nes_stack_start; | ||
255 | |||
256 | /* Control Structures */ | ||
257 | void *cqp_vbase; | ||
258 | dma_addr_t cqp_pbase; | ||
259 | u32 cqp_mem_size; | ||
260 | u8 ceq_index; | ||
261 | u8 nic_ceq_index; | ||
262 | struct nes_hw_cqp cqp; | ||
263 | struct nes_hw_cq ccq; | ||
264 | struct list_head cqp_avail_reqs; | ||
265 | struct list_head cqp_pending_reqs; | ||
266 | struct nes_cqp_request *nes_cqp_requests; | ||
267 | |||
268 | u32 int_req; | ||
269 | u32 int_stat; | ||
270 | u32 timer_int_req; | ||
271 | u32 timer_only_int_count; | ||
272 | u32 intf_int_req; | ||
273 | u32 last_mac_tx_pauses; | ||
274 | u32 last_used_chunks_tx; | ||
275 | struct list_head list; | ||
276 | |||
277 | u16 base_doorbell_index; | ||
278 | u16 currcq_count; | ||
279 | u16 deepcq_count; | ||
280 | u8 msi_enabled; | ||
281 | u8 netdev_count; | ||
282 | u8 napi_isr_ran; | ||
283 | u8 disable_rx_flow_control; | ||
284 | u8 disable_tx_flow_control; | ||
285 | }; | ||
286 | |||
287 | |||
288 | static inline void | ||
289 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) | ||
290 | { | ||
291 | wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value)); | ||
292 | wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value))); | ||
293 | } | ||
294 | |||
295 | static inline void | ||
296 | set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value) | ||
297 | { | ||
298 | wqe_words[index] = cpu_to_le32(value); | ||
299 | } | ||
300 | |||
301 | static inline void | ||
302 | nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev) | ||
303 | { | ||
304 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX, | ||
305 | (u64)((unsigned long) &nesdev->cqp)); | ||
306 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0; | ||
307 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0; | ||
308 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0; | ||
309 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0; | ||
310 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0; | ||
311 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0; | ||
312 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_HIGH_IDX] = 0; | ||
313 | } | ||
314 | |||
315 | static inline void | ||
316 | nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head) | ||
317 | { | ||
318 | u32 value; | ||
319 | value = ((u32)((unsigned long) nesqp)) | head; | ||
320 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX, | ||
321 | (u32)(upper_32_bits((unsigned long)(nesqp)))); | ||
322 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value); | ||
323 | } | ||
324 | |||
325 | /* Read from memory-mapped device */ | ||
326 | static inline u32 nes_read_indexed(struct nes_device *nesdev, u32 reg_index) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | void __iomem *addr = nesdev->index_reg; | ||
330 | u32 value; | ||
331 | |||
332 | spin_lock_irqsave(&nesdev->indexed_regs_lock, flags); | ||
333 | |||
334 | writel(reg_index, addr); | ||
335 | value = readl((void __iomem *)addr + 4); | ||
336 | |||
337 | spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags); | ||
338 | return value; | ||
339 | } | ||
340 | |||
341 | static inline u32 nes_read32(const void __iomem *addr) | ||
342 | { | ||
343 | return readl(addr); | ||
344 | } | ||
345 | |||
346 | static inline u16 nes_read16(const void __iomem *addr) | ||
347 | { | ||
348 | return readw(addr); | ||
349 | } | ||
350 | |||
351 | static inline u8 nes_read8(const void __iomem *addr) | ||
352 | { | ||
353 | return readb(addr); | ||
354 | } | ||
355 | |||
356 | /* Write to memory-mapped device */ | ||
357 | static inline void nes_write_indexed(struct nes_device *nesdev, u32 reg_index, u32 val) | ||
358 | { | ||
359 | unsigned long flags; | ||
360 | void __iomem *addr = nesdev->index_reg; | ||
361 | |||
362 | spin_lock_irqsave(&nesdev->indexed_regs_lock, flags); | ||
363 | |||
364 | writel(reg_index, addr); | ||
365 | writel(val, (void __iomem *)addr + 4); | ||
366 | |||
367 | spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags); | ||
368 | } | ||
369 | |||
370 | static inline void nes_write32(void __iomem *addr, u32 val) | ||
371 | { | ||
372 | writel(val, addr); | ||
373 | } | ||
374 | |||
375 | static inline void nes_write16(void __iomem *addr, u16 val) | ||
376 | { | ||
377 | writew(val, addr); | ||
378 | } | ||
379 | |||
380 | static inline void nes_write8(void __iomem *addr, u8 val) | ||
381 | { | ||
382 | writeb(val, addr); | ||
383 | } | ||
384 | |||
385 | |||
386 | |||
387 | static inline int nes_alloc_resource(struct nes_adapter *nesadapter, | ||
388 | unsigned long *resource_array, u32 max_resources, | ||
389 | u32 *req_resource_num, u32 *next) | ||
390 | { | ||
391 | unsigned long flags; | ||
392 | u32 resource_num; | ||
393 | |||
394 | spin_lock_irqsave(&nesadapter->resource_lock, flags); | ||
395 | |||
396 | resource_num = find_next_zero_bit(resource_array, max_resources, *next); | ||
397 | if (resource_num >= max_resources) { | ||
398 | resource_num = find_first_zero_bit(resource_array, max_resources); | ||
399 | if (resource_num >= max_resources) { | ||
400 | printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__); | ||
401 | spin_unlock_irqrestore(&nesadapter->resource_lock, flags); | ||
402 | return -EMFILE; | ||
403 | } | ||
404 | } | ||
405 | set_bit(resource_num, resource_array); | ||
406 | *next = resource_num+1; | ||
407 | if (*next == max_resources) { | ||
408 | *next = 0; | ||
409 | } | ||
410 | spin_unlock_irqrestore(&nesadapter->resource_lock, flags); | ||
411 | *req_resource_num = resource_num; | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static inline int nes_is_resource_allocated(struct nes_adapter *nesadapter, | ||
417 | unsigned long *resource_array, u32 resource_num) | ||
418 | { | ||
419 | unsigned long flags; | ||
420 | int bit_is_set; | ||
421 | |||
422 | spin_lock_irqsave(&nesadapter->resource_lock, flags); | ||
423 | |||
424 | bit_is_set = test_bit(resource_num, resource_array); | ||
425 | nes_debug(NES_DBG_HW, "resource_num %u is%s allocated.\n", | ||
426 | resource_num, (bit_is_set ? "": " not")); | ||
427 | spin_unlock_irqrestore(&nesadapter->resource_lock, flags); | ||
428 | |||
429 | return bit_is_set; | ||
430 | } | ||
431 | |||
432 | static inline void nes_free_resource(struct nes_adapter *nesadapter, | ||
433 | unsigned long *resource_array, u32 resource_num) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | |||
437 | spin_lock_irqsave(&nesadapter->resource_lock, flags); | ||
438 | clear_bit(resource_num, resource_array); | ||
439 | spin_unlock_irqrestore(&nesadapter->resource_lock, flags); | ||
440 | } | ||
441 | |||
442 | static inline struct nes_vnic *to_nesvnic(struct ib_device *ibdev) | ||
443 | { | ||
444 | return container_of(ibdev, struct nes_ib_device, ibdev)->nesvnic; | ||
445 | } | ||
446 | |||
447 | static inline struct nes_pd *to_nespd(struct ib_pd *ibpd) | ||
448 | { | ||
449 | return container_of(ibpd, struct nes_pd, ibpd); | ||
450 | } | ||
451 | |||
452 | static inline struct nes_ucontext *to_nesucontext(struct ib_ucontext *ibucontext) | ||
453 | { | ||
454 | return container_of(ibucontext, struct nes_ucontext, ibucontext); | ||
455 | } | ||
456 | |||
457 | static inline struct nes_mr *to_nesmr(struct ib_mr *ibmr) | ||
458 | { | ||
459 | return container_of(ibmr, struct nes_mr, ibmr); | ||
460 | } | ||
461 | |||
462 | static inline struct nes_mr *to_nesmr_from_ibfmr(struct ib_fmr *ibfmr) | ||
463 | { | ||
464 | return container_of(ibfmr, struct nes_mr, ibfmr); | ||
465 | } | ||
466 | |||
467 | static inline struct nes_mr *to_nesmw(struct ib_mw *ibmw) | ||
468 | { | ||
469 | return container_of(ibmw, struct nes_mr, ibmw); | ||
470 | } | ||
471 | |||
472 | static inline struct nes_fmr *to_nesfmr(struct nes_mr *nesmr) | ||
473 | { | ||
474 | return container_of(nesmr, struct nes_fmr, nesmr); | ||
475 | } | ||
476 | |||
477 | static inline struct nes_cq *to_nescq(struct ib_cq *ibcq) | ||
478 | { | ||
479 | return container_of(ibcq, struct nes_cq, ibcq); | ||
480 | } | ||
481 | |||
482 | static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp) | ||
483 | { | ||
484 | return container_of(ibqp, struct nes_qp, ibqp); | ||
485 | } | ||
486 | |||
487 | |||
488 | |||
489 | /* nes.c */ | ||
490 | void nes_add_ref(struct ib_qp *); | ||
491 | void nes_rem_ref(struct ib_qp *); | ||
492 | struct ib_qp *nes_get_qp(struct ib_device *, int); | ||
493 | |||
494 | |||
495 | /* nes_hw.c */ | ||
496 | struct nes_adapter *nes_init_adapter(struct nes_device *, u8); | ||
497 | void nes_nic_init_timer_defaults(struct nes_device *, u8); | ||
498 | unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *); | ||
499 | int nes_init_serdes(struct nes_device *, u8, u8, u8); | ||
500 | void nes_init_csr_ne020(struct nes_device *, u8, u8); | ||
501 | void nes_destroy_adapter(struct nes_adapter *); | ||
502 | int nes_init_cqp(struct nes_device *); | ||
503 | int nes_init_phy(struct nes_device *); | ||
504 | int nes_init_nic_qp(struct nes_device *, struct net_device *); | ||
505 | void nes_destroy_nic_qp(struct nes_vnic *); | ||
506 | int nes_napi_isr(struct nes_device *); | ||
507 | void nes_dpc(unsigned long); | ||
508 | void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *); | ||
509 | void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *); | ||
510 | void nes_process_mac_intr(struct nes_device *, u32); | ||
511 | void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *); | ||
512 | void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *); | ||
513 | void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *); | ||
514 | void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *); | ||
515 | void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); | ||
516 | int nes_destroy_cqp(struct nes_device *); | ||
517 | int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); | ||
518 | |||
519 | /* nes_nic.c */ | ||
520 | void nes_netdev_set_multicast_list(struct net_device *); | ||
521 | void nes_netdev_exit(struct nes_vnic *); | ||
522 | struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); | ||
523 | void nes_netdev_destroy(struct net_device *); | ||
524 | int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); | ||
525 | |||
526 | /* nes_cm.c */ | ||
527 | void *nes_cm_create(struct net_device *); | ||
528 | int nes_cm_recv(struct sk_buff *, struct net_device *); | ||
529 | void nes_update_arp(unsigned char *, u32, u32, u16, u16); | ||
530 | void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32); | ||
531 | void nes_sock_release(struct nes_qp *, unsigned long *); | ||
532 | struct nes_cm_core *nes_cm_alloc_core(void); | ||
533 | void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32); | ||
534 | int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32); | ||
535 | int nes_cm_disconn(struct nes_qp *); | ||
536 | void nes_cm_disconn_worker(void *); | ||
537 | |||
538 | /* nes_verbs.c */ | ||
539 | int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); | ||
540 | int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); | ||
541 | struct nes_ib_device *nes_init_ofa_device(struct net_device *); | ||
542 | void nes_destroy_ofa_device(struct nes_ib_device *); | ||
543 | int nes_register_ofa_device(struct nes_ib_device *); | ||
544 | void nes_unregister_ofa_device(struct nes_ib_device *); | ||
545 | |||
546 | /* nes_util.c */ | ||
547 | int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *); | ||
548 | void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16); | ||
549 | void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *); | ||
550 | void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16); | ||
551 | void nes_read_10G_phy_reg(struct nes_device *, u16, u8); | ||
552 | struct nes_cqp_request *nes_get_cqp_request(struct nes_device *); | ||
553 | void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int); | ||
554 | int nes_arp_table(struct nes_device *, u32, u8 *, u32); | ||
555 | void nes_mh_fix(unsigned long); | ||
556 | void nes_clc(unsigned long); | ||
557 | void nes_dump_mem(unsigned int, void *, int); | ||
558 | u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32); | ||
559 | |||
560 | #endif /* __NES_H */ | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c new file mode 100644 index 000000000000..bd5cfeaac203 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -0,0 +1,3088 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | |||
35 | #define TCPOPT_TIMESTAMP 8 | ||
36 | |||
37 | #include <asm/atomic.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/ip.h> | ||
40 | #include <linux/tcp.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/if_arp.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/net.h> | ||
45 | #include <linux/types.h> | ||
46 | #include <linux/timer.h> | ||
47 | #include <linux/time.h> | ||
48 | #include <linux/delay.h> | ||
49 | #include <linux/etherdevice.h> | ||
50 | #include <linux/netdevice.h> | ||
51 | #include <linux/random.h> | ||
52 | #include <linux/list.h> | ||
53 | #include <linux/threads.h> | ||
54 | |||
55 | #include <net/neighbour.h> | ||
56 | #include <net/route.h> | ||
57 | #include <net/ip_fib.h> | ||
58 | |||
59 | #include "nes.h" | ||
60 | |||
61 | u32 cm_packets_sent; | ||
62 | u32 cm_packets_bounced; | ||
63 | u32 cm_packets_dropped; | ||
64 | u32 cm_packets_retrans; | ||
65 | u32 cm_packets_created; | ||
66 | u32 cm_packets_received; | ||
67 | u32 cm_listens_created; | ||
68 | u32 cm_listens_destroyed; | ||
69 | u32 cm_backlog_drops; | ||
70 | atomic_t cm_loopbacks; | ||
71 | atomic_t cm_nodes_created; | ||
72 | atomic_t cm_nodes_destroyed; | ||
73 | atomic_t cm_accel_dropped_pkts; | ||
74 | atomic_t cm_resets_recvd; | ||
75 | |||
76 | static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); | ||
77 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, | ||
78 | struct nes_vnic *, struct nes_cm_info *); | ||
79 | static int add_ref_cm_node(struct nes_cm_node *); | ||
80 | static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); | ||
81 | static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); | ||
82 | |||
83 | |||
84 | /* External CM API Interface */ | ||
85 | /* instance of function pointers for client API */ | ||
86 | /* set address of this instance to cm_core->cm_ops at cm_core alloc */ | ||
87 | static struct nes_cm_ops nes_cm_api = { | ||
88 | mini_cm_accelerated, | ||
89 | mini_cm_listen, | ||
90 | mini_cm_del_listen, | ||
91 | mini_cm_connect, | ||
92 | mini_cm_close, | ||
93 | mini_cm_accept, | ||
94 | mini_cm_reject, | ||
95 | mini_cm_recv_pkt, | ||
96 | mini_cm_dealloc_core, | ||
97 | mini_cm_get, | ||
98 | mini_cm_set | ||
99 | }; | ||
100 | |||
101 | struct nes_cm_core *g_cm_core; | ||
102 | |||
103 | atomic_t cm_connects; | ||
104 | atomic_t cm_accepts; | ||
105 | atomic_t cm_disconnects; | ||
106 | atomic_t cm_closes; | ||
107 | atomic_t cm_connecteds; | ||
108 | atomic_t cm_connect_reqs; | ||
109 | atomic_t cm_rejects; | ||
110 | |||
111 | |||
112 | /** | ||
113 | * create_event | ||
114 | */ | ||
115 | static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, | ||
116 | enum nes_cm_event_type type) | ||
117 | { | ||
118 | struct nes_cm_event *event; | ||
119 | |||
120 | if (!cm_node->cm_id) | ||
121 | return NULL; | ||
122 | |||
123 | /* allocate an empty event */ | ||
124 | event = kzalloc(sizeof(*event), GFP_ATOMIC); | ||
125 | |||
126 | if (!event) | ||
127 | return NULL; | ||
128 | |||
129 | event->type = type; | ||
130 | event->cm_node = cm_node; | ||
131 | event->cm_info.rem_addr = cm_node->rem_addr; | ||
132 | event->cm_info.loc_addr = cm_node->loc_addr; | ||
133 | event->cm_info.rem_port = cm_node->rem_port; | ||
134 | event->cm_info.loc_port = cm_node->loc_port; | ||
135 | event->cm_info.cm_id = cm_node->cm_id; | ||
136 | |||
137 | nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x]," | ||
138 | " src_addr=%08x[%x]\n", | ||
139 | event, type, | ||
140 | event->cm_info.loc_addr, event->cm_info.loc_port, | ||
141 | event->cm_info.rem_addr, event->cm_info.rem_port); | ||
142 | |||
143 | nes_cm_post_event(event); | ||
144 | return event; | ||
145 | } | ||
146 | |||
147 | |||
148 | /** | ||
149 | * send_mpa_request | ||
150 | */ | ||
151 | int send_mpa_request(struct nes_cm_node *cm_node) | ||
152 | { | ||
153 | struct sk_buff *skb; | ||
154 | int ret; | ||
155 | |||
156 | skb = get_free_pkt(cm_node); | ||
157 | if (!skb) { | ||
158 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | ||
159 | return -1; | ||
160 | } | ||
161 | |||
162 | /* send an MPA Request frame */ | ||
163 | form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, | ||
164 | cm_node->mpa_frame_size, SET_ACK); | ||
165 | |||
166 | ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | ||
167 | if (ret < 0) { | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | |||
175 | /** | ||
176 | * recv_mpa - process a received TCP pkt, we are expecting an | ||
177 | * IETF MPA frame | ||
178 | */ | ||
179 | static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) | ||
180 | { | ||
181 | struct ietf_mpa_frame *mpa_frame; | ||
182 | |||
183 | /* assume req frame is in tcp data payload */ | ||
184 | if (len < sizeof(struct ietf_mpa_frame)) { | ||
185 | nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); | ||
186 | return -1; | ||
187 | } | ||
188 | |||
189 | mpa_frame = (struct ietf_mpa_frame *)buffer; | ||
190 | cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len); | ||
191 | |||
192 | if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { | ||
193 | nes_debug(NES_DBG_CM, "The received ietf buffer was not right" | ||
194 | " complete (%x + %x != %x)\n", | ||
195 | cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len); | ||
196 | return -1; | ||
197 | } | ||
198 | |||
199 | /* copy entire MPA frame to our cm_node's frame */ | ||
200 | memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), | ||
201 | cm_node->mpa_frame_size); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | |||
207 | /** | ||
208 | * handle_exception_pkt - process an exception packet. | ||
209 | * We have been in a TSA state, and we have now received SW | ||
210 | * TCP/IP traffic should be a FIN request or IP pkt with options | ||
211 | */ | ||
212 | static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | struct tcphdr *tcph = tcp_hdr(skb); | ||
216 | |||
217 | /* first check to see if this a FIN pkt */ | ||
218 | if (tcph->fin) { | ||
219 | /* we need to ACK the FIN request */ | ||
220 | send_ack(cm_node); | ||
221 | |||
222 | /* check which side we are (client/server) and set next state accordingly */ | ||
223 | if (cm_node->tcp_cntxt.client) | ||
224 | cm_node->state = NES_CM_STATE_CLOSING; | ||
225 | else { | ||
226 | /* we are the server side */ | ||
227 | cm_node->state = NES_CM_STATE_CLOSE_WAIT; | ||
228 | /* since this is a self contained CM we don't wait for */ | ||
229 | /* an APP to close us, just send final FIN immediately */ | ||
230 | ret = send_fin(cm_node, NULL); | ||
231 | cm_node->state = NES_CM_STATE_LAST_ACK; | ||
232 | } | ||
233 | } else { | ||
234 | ret = -EINVAL; | ||
235 | } | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | |||
241 | /** | ||
242 | * form_cm_frame - get a free packet and build empty frame Use | ||
243 | * node info to build. | ||
244 | */ | ||
245 | struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node, | ||
246 | void *options, u32 optionsize, void *data, u32 datasize, u8 flags) | ||
247 | { | ||
248 | struct tcphdr *tcph; | ||
249 | struct iphdr *iph; | ||
250 | struct ethhdr *ethh; | ||
251 | u8 *buf; | ||
252 | u16 packetsize = sizeof(*iph); | ||
253 | |||
254 | packetsize += sizeof(*tcph); | ||
255 | packetsize += optionsize + datasize; | ||
256 | |||
257 | memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph)); | ||
258 | |||
259 | skb->len = 0; | ||
260 | buf = skb_put(skb, packetsize + ETH_HLEN); | ||
261 | |||
262 | ethh = (struct ethhdr *) buf; | ||
263 | buf += ETH_HLEN; | ||
264 | |||
265 | iph = (struct iphdr *)buf; | ||
266 | buf += sizeof(*iph); | ||
267 | tcph = (struct tcphdr *)buf; | ||
268 | skb_reset_mac_header(skb); | ||
269 | skb_set_network_header(skb, ETH_HLEN); | ||
270 | skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph)); | ||
271 | buf += sizeof(*tcph); | ||
272 | |||
273 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
274 | skb->protocol = htons(0x800); | ||
275 | skb->data_len = 0; | ||
276 | skb->mac_len = ETH_HLEN; | ||
277 | |||
278 | memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN); | ||
279 | memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN); | ||
280 | ethh->h_proto = htons(0x0800); | ||
281 | |||
282 | iph->version = IPVERSION; | ||
283 | iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ | ||
284 | iph->tos = 0; | ||
285 | iph->tot_len = htons(packetsize); | ||
286 | iph->id = htons(++cm_node->tcp_cntxt.loc_id); | ||
287 | |||
288 | iph->frag_off = htons(0x4000); | ||
289 | iph->ttl = 0x40; | ||
290 | iph->protocol = 0x06; /* IPPROTO_TCP */ | ||
291 | |||
292 | iph->saddr = htonl(cm_node->loc_addr); | ||
293 | iph->daddr = htonl(cm_node->rem_addr); | ||
294 | |||
295 | tcph->source = htons(cm_node->loc_port); | ||
296 | tcph->dest = htons(cm_node->rem_port); | ||
297 | tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); | ||
298 | |||
299 | if (flags & SET_ACK) { | ||
300 | cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; | ||
301 | tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); | ||
302 | tcph->ack = 1; | ||
303 | } else | ||
304 | tcph->ack_seq = 0; | ||
305 | |||
306 | if (flags & SET_SYN) { | ||
307 | cm_node->tcp_cntxt.loc_seq_num++; | ||
308 | tcph->syn = 1; | ||
309 | } else | ||
310 | cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */ | ||
311 | |||
312 | if (flags & SET_FIN) | ||
313 | tcph->fin = 1; | ||
314 | |||
315 | if (flags & SET_RST) | ||
316 | tcph->rst = 1; | ||
317 | |||
318 | tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2); | ||
319 | tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); | ||
320 | tcph->urg_ptr = 0; | ||
321 | if (optionsize) | ||
322 | memcpy(buf, options, optionsize); | ||
323 | buf += optionsize; | ||
324 | if (datasize) | ||
325 | memcpy(buf, data, datasize); | ||
326 | |||
327 | skb_shinfo(skb)->nr_frags = 0; | ||
328 | cm_packets_created++; | ||
329 | |||
330 | return skb; | ||
331 | } | ||
332 | |||
333 | |||
334 | /** | ||
335 | * print_core - dump a cm core | ||
336 | */ | ||
337 | static void print_core(struct nes_cm_core *core) | ||
338 | { | ||
339 | nes_debug(NES_DBG_CM, "---------------------------------------------\n"); | ||
340 | nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core); | ||
341 | if (!core) | ||
342 | return; | ||
343 | nes_debug(NES_DBG_CM, "---------------------------------------------\n"); | ||
344 | nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id)); | ||
345 | |||
346 | nes_debug(NES_DBG_CM, "State : %u \n", core->state); | ||
347 | |||
348 | nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list)); | ||
349 | nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt)); | ||
350 | nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt)); | ||
351 | |||
352 | nes_debug(NES_DBG_CM, "core : %p \n", core); | ||
353 | |||
354 | nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); | ||
355 | } | ||
356 | |||
357 | |||
358 | /** | ||
359 | * schedule_nes_timer | ||
360 | * note - cm_node needs to be protected before calling this. Encase in: | ||
361 | * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node); | ||
362 | */ | ||
363 | int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | ||
364 | enum nes_timer_type type, int send_retrans, | ||
365 | int close_when_complete) | ||
366 | { | ||
367 | unsigned long flags; | ||
368 | struct nes_cm_core *cm_core; | ||
369 | struct nes_timer_entry *new_send; | ||
370 | int ret = 0; | ||
371 | u32 was_timer_set; | ||
372 | |||
373 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | ||
374 | if (!new_send) | ||
375 | return -1; | ||
376 | if (!cm_node) | ||
377 | return -EINVAL; | ||
378 | |||
379 | /* new_send->timetosend = currenttime */ | ||
380 | new_send->retrycount = NES_DEFAULT_RETRYS; | ||
381 | new_send->retranscount = NES_DEFAULT_RETRANS; | ||
382 | new_send->skb = skb; | ||
383 | new_send->timetosend = jiffies; | ||
384 | new_send->type = type; | ||
385 | new_send->netdev = cm_node->netdev; | ||
386 | new_send->send_retrans = send_retrans; | ||
387 | new_send->close_when_complete = close_when_complete; | ||
388 | |||
389 | if (type == NES_TIMER_TYPE_CLOSE) { | ||
390 | new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */ | ||
391 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | ||
392 | list_add_tail(&new_send->list, &cm_node->recv_list); | ||
393 | spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); | ||
394 | } | ||
395 | |||
396 | if (type == NES_TIMER_TYPE_SEND) { | ||
397 | new_send->seq_num = htonl(tcp_hdr(skb)->seq); | ||
398 | atomic_inc(&new_send->skb->users); | ||
399 | |||
400 | ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); | ||
401 | if (ret != NETDEV_TX_OK) { | ||
402 | nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n", | ||
403 | new_send, jiffies); | ||
404 | atomic_dec(&new_send->skb->users); | ||
405 | new_send->timetosend = jiffies; | ||
406 | } else { | ||
407 | cm_packets_sent++; | ||
408 | if (!send_retrans) { | ||
409 | if (close_when_complete) | ||
410 | rem_ref_cm_node(cm_node->cm_core, cm_node); | ||
411 | dev_kfree_skb_any(new_send->skb); | ||
412 | kfree(new_send); | ||
413 | return ret; | ||
414 | } | ||
415 | new_send->timetosend = jiffies + NES_RETRY_TIMEOUT; | ||
416 | } | ||
417 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
418 | list_add_tail(&new_send->list, &cm_node->retrans_list); | ||
419 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
420 | } | ||
421 | if (type == NES_TIMER_TYPE_RECV) { | ||
422 | new_send->seq_num = htonl(tcp_hdr(skb)->seq); | ||
423 | new_send->timetosend = jiffies; | ||
424 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | ||
425 | list_add_tail(&new_send->list, &cm_node->recv_list); | ||
426 | spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); | ||
427 | } | ||
428 | cm_core = cm_node->cm_core; | ||
429 | |||
430 | was_timer_set = timer_pending(&cm_core->tcp_timer); | ||
431 | |||
432 | if (!was_timer_set) { | ||
433 | cm_core->tcp_timer.expires = new_send->timetosend; | ||
434 | add_timer(&cm_core->tcp_timer); | ||
435 | } | ||
436 | |||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | |||
441 | /** | ||
442 | * nes_cm_timer_tick | ||
443 | */ | ||
444 | void nes_cm_timer_tick(unsigned long pass) | ||
445 | { | ||
446 | unsigned long flags, qplockflags; | ||
447 | unsigned long nexttimeout = jiffies + NES_LONG_TIME; | ||
448 | struct iw_cm_id *cm_id; | ||
449 | struct nes_cm_node *cm_node; | ||
450 | struct nes_timer_entry *send_entry, *recv_entry; | ||
451 | struct list_head *list_core, *list_core_temp; | ||
452 | struct list_head *list_node, *list_node_temp; | ||
453 | struct nes_cm_core *cm_core = g_cm_core; | ||
454 | struct nes_qp *nesqp; | ||
455 | struct sk_buff *skb; | ||
456 | u32 settimer = 0; | ||
457 | int ret = NETDEV_TX_OK; | ||
458 | int node_done; | ||
459 | |||
460 | spin_lock_irqsave(&cm_core->ht_lock, flags); | ||
461 | |||
462 | list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { | ||
463 | cm_node = container_of(list_node, struct nes_cm_node, list); | ||
464 | add_ref_cm_node(cm_node); | ||
465 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
466 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | ||
467 | list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { | ||
468 | recv_entry = container_of(list_core, struct nes_timer_entry, list); | ||
469 | if ((time_after(recv_entry->timetosend, jiffies)) && | ||
470 | (recv_entry->type == NES_TIMER_TYPE_CLOSE)) { | ||
471 | if (nexttimeout > recv_entry->timetosend || !settimer) { | ||
472 | nexttimeout = recv_entry->timetosend; | ||
473 | settimer = 1; | ||
474 | } | ||
475 | continue; | ||
476 | } | ||
477 | list_del(&recv_entry->list); | ||
478 | cm_id = cm_node->cm_id; | ||
479 | spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); | ||
480 | if (recv_entry->type == NES_TIMER_TYPE_CLOSE) { | ||
481 | nesqp = (struct nes_qp *)recv_entry->skb; | ||
482 | spin_lock_irqsave(&nesqp->lock, qplockflags); | ||
483 | if (nesqp->cm_id) { | ||
484 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: " | ||
485 | "****** HIT A NES_TIMER_TYPE_CLOSE" | ||
486 | " with something to do!!! ******\n", | ||
487 | nesqp->hwqp.qp_id, cm_id, | ||
488 | atomic_read(&nesqp->refcount)); | ||
489 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
490 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; | ||
491 | nesqp->ibqp_state = IB_QPS_ERR; | ||
492 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
493 | nes_cm_disconn(nesqp); | ||
494 | } else { | ||
495 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
496 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:" | ||
497 | " ****** HIT A NES_TIMER_TYPE_CLOSE" | ||
498 | " with nothing to do!!! ******\n", | ||
499 | nesqp->hwqp.qp_id, cm_id, | ||
500 | atomic_read(&nesqp->refcount)); | ||
501 | nes_rem_ref(&nesqp->ibqp); | ||
502 | } | ||
503 | if (cm_id) | ||
504 | cm_id->rem_ref(cm_id); | ||
505 | } | ||
506 | kfree(recv_entry); | ||
507 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | ||
508 | } | ||
509 | spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); | ||
510 | |||
511 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
512 | node_done = 0; | ||
513 | list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) { | ||
514 | if (node_done) { | ||
515 | break; | ||
516 | } | ||
517 | send_entry = container_of(list_core, struct nes_timer_entry, list); | ||
518 | if (time_after(send_entry->timetosend, jiffies)) { | ||
519 | if (cm_node->state != NES_CM_STATE_TSA) { | ||
520 | if ((nexttimeout > send_entry->timetosend) || !settimer) { | ||
521 | nexttimeout = send_entry->timetosend; | ||
522 | settimer = 1; | ||
523 | } | ||
524 | node_done = 1; | ||
525 | continue; | ||
526 | } else { | ||
527 | list_del(&send_entry->list); | ||
528 | skb = send_entry->skb; | ||
529 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
530 | dev_kfree_skb_any(skb); | ||
531 | kfree(send_entry); | ||
532 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
533 | continue; | ||
534 | } | ||
535 | } | ||
536 | if (send_entry->type == NES_TIMER_NODE_CLEANUP) { | ||
537 | list_del(&send_entry->list); | ||
538 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
539 | kfree(send_entry); | ||
540 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
541 | continue; | ||
542 | } | ||
543 | if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) || | ||
544 | (cm_node->state == NES_CM_STATE_TSA) || | ||
545 | (cm_node->state == NES_CM_STATE_CLOSED)) { | ||
546 | skb = send_entry->skb; | ||
547 | list_del(&send_entry->list); | ||
548 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
549 | kfree(send_entry); | ||
550 | dev_kfree_skb_any(skb); | ||
551 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
552 | continue; | ||
553 | } | ||
554 | |||
555 | if (!send_entry->retranscount || !send_entry->retrycount) { | ||
556 | cm_packets_dropped++; | ||
557 | skb = send_entry->skb; | ||
558 | list_del(&send_entry->list); | ||
559 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
560 | dev_kfree_skb_any(skb); | ||
561 | kfree(send_entry); | ||
562 | if (cm_node->state == NES_CM_STATE_SYN_RCVD) { | ||
563 | /* this node never even generated an indication up to the cm */ | ||
564 | rem_ref_cm_node(cm_core, cm_node); | ||
565 | } else { | ||
566 | cm_node->state = NES_CM_STATE_CLOSED; | ||
567 | create_event(cm_node, NES_CM_EVENT_ABORTED); | ||
568 | } | ||
569 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
570 | continue; | ||
571 | } | ||
572 | /* this seems like the correct place, but leave send entry unprotected */ | ||
573 | // spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
574 | atomic_inc(&send_entry->skb->users); | ||
575 | cm_packets_retrans++; | ||
576 | nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p," | ||
577 | " jiffies = %lu, time to send = %lu, retranscount = %u, " | ||
578 | "send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n", | ||
579 | send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount, | ||
580 | send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num); | ||
581 | |||
582 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
583 | ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); | ||
584 | if (ret != NETDEV_TX_OK) { | ||
585 | cm_packets_bounced++; | ||
586 | atomic_dec(&send_entry->skb->users); | ||
587 | send_entry->retrycount--; | ||
588 | nexttimeout = jiffies + NES_SHORT_TIME; | ||
589 | settimer = 1; | ||
590 | node_done = 1; | ||
591 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
592 | continue; | ||
593 | } else { | ||
594 | cm_packets_sent++; | ||
595 | } | ||
596 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
597 | list_del(&send_entry->list); | ||
598 | nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n", | ||
599 | send_entry->retranscount, send_entry->retrycount); | ||
600 | if (send_entry->send_retrans) { | ||
601 | send_entry->retranscount--; | ||
602 | send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT; | ||
603 | if (nexttimeout > send_entry->timetosend || !settimer) { | ||
604 | nexttimeout = send_entry->timetosend; | ||
605 | settimer = 1; | ||
606 | } | ||
607 | list_add(&send_entry->list, &cm_node->retrans_list); | ||
608 | continue; | ||
609 | } else { | ||
610 | int close_when_complete; | ||
611 | skb = send_entry->skb; | ||
612 | close_when_complete = send_entry->close_when_complete; | ||
613 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
614 | if (close_when_complete) { | ||
615 | BUG_ON(atomic_read(&cm_node->ref_count) == 1); | ||
616 | rem_ref_cm_node(cm_core, cm_node); | ||
617 | } | ||
618 | dev_kfree_skb_any(skb); | ||
619 | kfree(send_entry); | ||
620 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
621 | continue; | ||
622 | } | ||
623 | } | ||
624 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
625 | |||
626 | rem_ref_cm_node(cm_core, cm_node); | ||
627 | |||
628 | spin_lock_irqsave(&cm_core->ht_lock, flags); | ||
629 | if (ret != NETDEV_TX_OK) | ||
630 | break; | ||
631 | } | ||
632 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
633 | |||
634 | if (settimer) { | ||
635 | if (!timer_pending(&cm_core->tcp_timer)) { | ||
636 | cm_core->tcp_timer.expires = nexttimeout; | ||
637 | add_timer(&cm_core->tcp_timer); | ||
638 | } | ||
639 | } | ||
640 | } | ||
641 | |||
642 | |||
643 | /** | ||
644 | * send_syn | ||
645 | */ | ||
646 | int send_syn(struct nes_cm_node *cm_node, u32 sendack) | ||
647 | { | ||
648 | int ret; | ||
649 | int flags = SET_SYN; | ||
650 | struct sk_buff *skb; | ||
651 | char optionsbuffer[sizeof(struct option_mss) + | ||
652 | sizeof(struct option_windowscale) + | ||
653 | sizeof(struct option_base) + 1]; | ||
654 | |||
655 | int optionssize = 0; | ||
656 | /* Sending MSS option */ | ||
657 | union all_known_options *options; | ||
658 | |||
659 | if (!cm_node) | ||
660 | return -EINVAL; | ||
661 | |||
662 | options = (union all_known_options *)&optionsbuffer[optionssize]; | ||
663 | options->as_mss.optionnum = OPTION_NUMBER_MSS; | ||
664 | options->as_mss.length = sizeof(struct option_mss); | ||
665 | options->as_mss.mss = htons(cm_node->tcp_cntxt.mss); | ||
666 | optionssize += sizeof(struct option_mss); | ||
667 | |||
668 | options = (union all_known_options *)&optionsbuffer[optionssize]; | ||
669 | options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE; | ||
670 | options->as_windowscale.length = sizeof(struct option_windowscale); | ||
671 | options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; | ||
672 | optionssize += sizeof(struct option_windowscale); | ||
673 | |||
674 | if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt) | ||
675 | ) { | ||
676 | options = (union all_known_options *)&optionsbuffer[optionssize]; | ||
677 | options->as_base.optionnum = OPTION_NUMBER_WRITE0; | ||
678 | options->as_base.length = sizeof(struct option_base); | ||
679 | optionssize += sizeof(struct option_base); | ||
680 | /* we need the size to be a multiple of 4 */ | ||
681 | options = (union all_known_options *)&optionsbuffer[optionssize]; | ||
682 | options->as_end = 1; | ||
683 | optionssize += 1; | ||
684 | options = (union all_known_options *)&optionsbuffer[optionssize]; | ||
685 | options->as_end = 1; | ||
686 | optionssize += 1; | ||
687 | } | ||
688 | |||
689 | options = (union all_known_options *)&optionsbuffer[optionssize]; | ||
690 | options->as_end = OPTION_NUMBER_END; | ||
691 | optionssize += 1; | ||
692 | |||
693 | skb = get_free_pkt(cm_node); | ||
694 | if (!skb) { | ||
695 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | ||
696 | return -1; | ||
697 | } | ||
698 | |||
699 | if (sendack) | ||
700 | flags |= SET_ACK; | ||
701 | |||
702 | form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags); | ||
703 | ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | ||
704 | |||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | |||
709 | /** | ||
710 | * send_reset | ||
711 | */ | ||
712 | int send_reset(struct nes_cm_node *cm_node) | ||
713 | { | ||
714 | int ret; | ||
715 | struct sk_buff *skb = get_free_pkt(cm_node); | ||
716 | int flags = SET_RST | SET_ACK; | ||
717 | |||
718 | if (!skb) { | ||
719 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | ||
720 | return -1; | ||
721 | } | ||
722 | |||
723 | add_ref_cm_node(cm_node); | ||
724 | form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags); | ||
725 | ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1); | ||
726 | |||
727 | return ret; | ||
728 | } | ||
729 | |||
730 | |||
731 | /** | ||
732 | * send_ack | ||
733 | */ | ||
734 | int send_ack(struct nes_cm_node *cm_node) | ||
735 | { | ||
736 | int ret; | ||
737 | struct sk_buff *skb = get_free_pkt(cm_node); | ||
738 | |||
739 | if (!skb) { | ||
740 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | ||
741 | return -1; | ||
742 | } | ||
743 | |||
744 | form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK); | ||
745 | ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0); | ||
746 | |||
747 | return ret; | ||
748 | } | ||
749 | |||
750 | |||
751 | /** | ||
752 | * send_fin | ||
753 | */ | ||
754 | int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) | ||
755 | { | ||
756 | int ret; | ||
757 | |||
758 | /* if we didn't get a frame get one */ | ||
759 | if (!skb) | ||
760 | skb = get_free_pkt(cm_node); | ||
761 | |||
762 | if (!skb) { | ||
763 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | ||
764 | return -1; | ||
765 | } | ||
766 | |||
767 | form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN); | ||
768 | ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | ||
769 | |||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | |||
774 | /** | ||
775 | * get_free_pkt | ||
776 | */ | ||
777 | struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node) | ||
778 | { | ||
779 | struct sk_buff *skb, *new_skb; | ||
780 | |||
781 | /* check to see if we need to repopulate the free tx pkt queue */ | ||
782 | if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) { | ||
783 | while (skb_queue_len(&cm_node->cm_core->tx_free_list) < | ||
784 | cm_node->cm_core->free_tx_pkt_max) { | ||
785 | /* replace the frame we took, we won't get it back */ | ||
786 | new_skb = dev_alloc_skb(cm_node->cm_core->mtu); | ||
787 | BUG_ON(!new_skb); | ||
788 | /* add a replacement frame to the free tx list head */ | ||
789 | skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb); | ||
790 | } | ||
791 | } | ||
792 | |||
793 | skb = skb_dequeue(&cm_node->cm_core->tx_free_list); | ||
794 | |||
795 | return skb; | ||
796 | } | ||
797 | |||
798 | |||
799 | /** | ||
800 | * make_hashkey - generate hash key from node tuple | ||
801 | */ | ||
802 | static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port, | ||
803 | nes_addr_t rem_addr) | ||
804 | { | ||
805 | u32 hashkey = 0; | ||
806 | |||
807 | hashkey = loc_addr + rem_addr + loc_port + rem_port; | ||
808 | hashkey = (hashkey % NES_CM_HASHTABLE_SIZE); | ||
809 | |||
810 | return hashkey; | ||
811 | } | ||
812 | |||
813 | |||
814 | /** | ||
815 | * find_node - find a cm node that matches the reference cm node | ||
816 | */ | ||
817 | static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | ||
818 | u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr) | ||
819 | { | ||
820 | unsigned long flags; | ||
821 | u32 hashkey; | ||
822 | struct list_head *list_pos; | ||
823 | struct list_head *hte; | ||
824 | struct nes_cm_node *cm_node; | ||
825 | |||
826 | /* make a hash index key for this packet */ | ||
827 | hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr); | ||
828 | |||
829 | /* get a handle on the hte */ | ||
830 | hte = &cm_core->connected_nodes; | ||
831 | |||
832 | nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", | ||
833 | loc_addr, loc_port, cm_core, hte); | ||
834 | |||
835 | /* walk list and find cm_node associated with this session ID */ | ||
836 | spin_lock_irqsave(&cm_core->ht_lock, flags); | ||
837 | list_for_each(list_pos, hte) { | ||
838 | cm_node = container_of(list_pos, struct nes_cm_node, list); | ||
839 | /* compare quad, return node handle if a match */ | ||
840 | nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", | ||
841 | cm_node->loc_addr, cm_node->loc_port, | ||
842 | loc_addr, loc_port, | ||
843 | cm_node->rem_addr, cm_node->rem_port, | ||
844 | rem_addr, rem_port); | ||
845 | if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) && | ||
846 | (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) { | ||
847 | add_ref_cm_node(cm_node); | ||
848 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
849 | return cm_node; | ||
850 | } | ||
851 | } | ||
852 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
853 | |||
854 | /* no owner node */ | ||
855 | return NULL; | ||
856 | } | ||
857 | |||
858 | |||
859 | /** | ||
860 | * find_listener - find a cm node listening on this addr-port pair | ||
861 | */ | ||
862 | static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | ||
863 | nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) | ||
864 | { | ||
865 | unsigned long flags; | ||
866 | struct list_head *listen_list; | ||
867 | struct nes_cm_listener *listen_node; | ||
868 | |||
869 | /* walk list and find cm_node associated with this session ID */ | ||
870 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | ||
871 | list_for_each(listen_list, &cm_core->listen_list.list) { | ||
872 | listen_node = container_of(listen_list, struct nes_cm_listener, list); | ||
873 | /* compare node pair, return node handle if a match */ | ||
874 | if (((listen_node->loc_addr == dst_addr) || | ||
875 | listen_node->loc_addr == 0x00000000) && | ||
876 | (listen_node->loc_port == dst_port) && | ||
877 | (listener_state & listen_node->listener_state)) { | ||
878 | atomic_inc(&listen_node->ref_count); | ||
879 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | ||
880 | return listen_node; | ||
881 | } | ||
882 | } | ||
883 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | ||
884 | |||
885 | nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", | ||
886 | dst_addr, dst_port); | ||
887 | |||
888 | /* no listener */ | ||
889 | return NULL; | ||
890 | } | ||
891 | |||
892 | |||
893 | /** | ||
894 | * add_hte_node - add a cm node to the hash table | ||
895 | */ | ||
896 | static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) | ||
897 | { | ||
898 | unsigned long flags; | ||
899 | u32 hashkey; | ||
900 | struct list_head *hte; | ||
901 | |||
902 | if (!cm_node || !cm_core) | ||
903 | return -EINVAL; | ||
904 | |||
905 | nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n"); | ||
906 | |||
907 | /* first, make an index into our hash table */ | ||
908 | hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr, | ||
909 | cm_node->rem_port, cm_node->rem_addr); | ||
910 | cm_node->hashkey = hashkey; | ||
911 | |||
912 | spin_lock_irqsave(&cm_core->ht_lock, flags); | ||
913 | |||
914 | /* get a handle on the hash table element (list head for this slot) */ | ||
915 | hte = &cm_core->connected_nodes; | ||
916 | list_add_tail(&cm_node->list, hte); | ||
917 | atomic_inc(&cm_core->ht_node_cnt); | ||
918 | |||
919 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | |||
924 | |||
925 | /** | ||
926 | * mini_cm_dec_refcnt_listen | ||
927 | */ | ||
928 | static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | ||
929 | struct nes_cm_listener *listener, int free_hanging_nodes) | ||
930 | { | ||
931 | int ret = 1; | ||
932 | unsigned long flags; | ||
933 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | ||
934 | if (!atomic_dec_return(&listener->ref_count)) { | ||
935 | list_del(&listener->list); | ||
936 | |||
937 | /* decrement our listen node count */ | ||
938 | atomic_dec(&cm_core->listen_node_cnt); | ||
939 | |||
940 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | ||
941 | |||
942 | if (listener->nesvnic) { | ||
943 | nes_manage_apbvt(listener->nesvnic, listener->loc_port, | ||
944 | PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); | ||
945 | } | ||
946 | |||
947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | ||
948 | |||
949 | kfree(listener); | ||
950 | ret = 0; | ||
951 | cm_listens_destroyed++; | ||
952 | } else { | ||
953 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | ||
954 | } | ||
955 | if (listener) { | ||
956 | if (atomic_read(&listener->pend_accepts_cnt) > 0) | ||
957 | nes_debug(NES_DBG_CM, "destroying listener (%p)" | ||
958 | " with non-zero pending accepts=%u\n", | ||
959 | listener, atomic_read(&listener->pend_accepts_cnt)); | ||
960 | } | ||
961 | |||
962 | return ret; | ||
963 | } | ||
964 | |||
965 | |||
966 | /** | ||
967 | * mini_cm_del_listen | ||
968 | */ | ||
969 | static int mini_cm_del_listen(struct nes_cm_core *cm_core, | ||
970 | struct nes_cm_listener *listener) | ||
971 | { | ||
972 | listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE; | ||
973 | listener->cm_id = NULL; /* going to be destroyed pretty soon */ | ||
974 | return mini_cm_dec_refcnt_listen(cm_core, listener, 1); | ||
975 | } | ||
976 | |||
977 | |||
978 | /** | ||
979 | * mini_cm_accelerated | ||
980 | */ | ||
981 | static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, | ||
982 | struct nes_cm_node *cm_node) | ||
983 | { | ||
984 | u32 was_timer_set; | ||
985 | cm_node->accelerated = 1; | ||
986 | |||
987 | if (cm_node->accept_pend) { | ||
988 | BUG_ON(!cm_node->listener); | ||
989 | atomic_dec(&cm_node->listener->pend_accepts_cnt); | ||
990 | BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); | ||
991 | } | ||
992 | |||
993 | was_timer_set = timer_pending(&cm_core->tcp_timer); | ||
994 | if (!was_timer_set) { | ||
995 | cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME; | ||
996 | add_timer(&cm_core->tcp_timer); | ||
997 | } | ||
998 | |||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | |||
1003 | /** | ||
1004 | * nes_addr_send_arp | ||
1005 | */ | ||
1006 | static void nes_addr_send_arp(u32 dst_ip) | ||
1007 | { | ||
1008 | struct rtable *rt; | ||
1009 | struct flowi fl; | ||
1010 | |||
1011 | memset(&fl, 0, sizeof fl); | ||
1012 | fl.nl_u.ip4_u.daddr = htonl(dst_ip); | ||
1013 | if (ip_route_output_key(&init_net, &rt, &fl)) { | ||
1014 | printk("%s: ip_route_output_key failed for 0x%08X\n", | ||
1015 | __FUNCTION__, dst_ip); | ||
1016 | return; | ||
1017 | } | ||
1018 | |||
1019 | neigh_event_send(rt->u.dst.neighbour, NULL); | ||
1020 | ip_rt_put(rt); | ||
1021 | } | ||
1022 | |||
1023 | |||
1024 | /** | ||
1025 | * make_cm_node - create a new instance of a cm node | ||
1026 | */ | ||
1027 | static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | ||
1028 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info, | ||
1029 | struct nes_cm_listener *listener) | ||
1030 | { | ||
1031 | struct nes_cm_node *cm_node; | ||
1032 | struct timespec ts; | ||
1033 | int arpindex = 0; | ||
1034 | struct nes_device *nesdev; | ||
1035 | struct nes_adapter *nesadapter; | ||
1036 | |||
1037 | /* create an hte and cm_node for this instance */ | ||
1038 | cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); | ||
1039 | if (!cm_node) | ||
1040 | return NULL; | ||
1041 | |||
1042 | /* set our node specific transport info */ | ||
1043 | cm_node->loc_addr = cm_info->loc_addr; | ||
1044 | cm_node->rem_addr = cm_info->rem_addr; | ||
1045 | cm_node->loc_port = cm_info->loc_port; | ||
1046 | cm_node->rem_port = cm_info->rem_port; | ||
1047 | cm_node->send_write0 = send_first; | ||
1048 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", | ||
1049 | cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); | ||
1050 | cm_node->listener = listener; | ||
1051 | cm_node->netdev = nesvnic->netdev; | ||
1052 | cm_node->cm_id = cm_info->cm_id; | ||
1053 | memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); | ||
1054 | |||
1055 | nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", | ||
1056 | cm_node->listener, cm_node->cm_id); | ||
1057 | |||
1058 | INIT_LIST_HEAD(&cm_node->retrans_list); | ||
1059 | spin_lock_init(&cm_node->retrans_list_lock); | ||
1060 | INIT_LIST_HEAD(&cm_node->recv_list); | ||
1061 | spin_lock_init(&cm_node->recv_list_lock); | ||
1062 | |||
1063 | cm_node->loopbackpartner = NULL; | ||
1064 | atomic_set(&cm_node->ref_count, 1); | ||
1065 | /* associate our parent CM core */ | ||
1066 | cm_node->cm_core = cm_core; | ||
1067 | cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID; | ||
1068 | cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; | ||
1069 | cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >> | ||
1070 | NES_CM_DEFAULT_RCV_WND_SCALE; | ||
1071 | ts = current_kernel_time(); | ||
1072 | cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); | ||
1073 | cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) - | ||
1074 | sizeof(struct tcphdr) - ETH_HLEN; | ||
1075 | cm_node->tcp_cntxt.rcv_nxt = 0; | ||
1076 | /* get a unique session ID , add thread_id to an upcounter to handle race */ | ||
1077 | atomic_inc(&cm_core->node_cnt); | ||
1078 | atomic_inc(&cm_core->session_id); | ||
1079 | cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid); | ||
1080 | cm_node->conn_type = cm_info->conn_type; | ||
1081 | cm_node->apbvt_set = 0; | ||
1082 | cm_node->accept_pend = 0; | ||
1083 | |||
1084 | cm_node->nesvnic = nesvnic; | ||
1085 | /* get some device handles, for arp lookup */ | ||
1086 | nesdev = nesvnic->nesdev; | ||
1087 | nesadapter = nesdev->nesadapter; | ||
1088 | |||
1089 | cm_node->loopbackpartner = NULL; | ||
1090 | /* get the mac addr for the remote node */ | ||
1091 | arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); | ||
1092 | if (arpindex < 0) { | ||
1093 | kfree(cm_node); | ||
1094 | nes_addr_send_arp(cm_info->rem_addr); | ||
1095 | return NULL; | ||
1096 | } | ||
1097 | |||
1098 | /* copy the mac addr to node context */ | ||
1099 | memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); | ||
1100 | nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," | ||
1101 | " %02x, %02x, %02x, %02x, %02x\n", | ||
1102 | cm_node->rem_mac[0], cm_node->rem_mac[1], | ||
1103 | cm_node->rem_mac[2], cm_node->rem_mac[3], | ||
1104 | cm_node->rem_mac[4], cm_node->rem_mac[5]); | ||
1105 | |||
1106 | add_hte_node(cm_core, cm_node); | ||
1107 | atomic_inc(&cm_nodes_created); | ||
1108 | |||
1109 | return cm_node; | ||
1110 | } | ||
1111 | |||
1112 | |||
1113 | /** | ||
1114 | * add_ref_cm_node - destroy an instance of a cm node | ||
1115 | */ | ||
1116 | static int add_ref_cm_node(struct nes_cm_node *cm_node) | ||
1117 | { | ||
1118 | atomic_inc(&cm_node->ref_count); | ||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | |||
1123 | /** | ||
1124 | * rem_ref_cm_node - destroy an instance of a cm node | ||
1125 | */ | ||
1126 | static int rem_ref_cm_node(struct nes_cm_core *cm_core, | ||
1127 | struct nes_cm_node *cm_node) | ||
1128 | { | ||
1129 | unsigned long flags, qplockflags; | ||
1130 | struct nes_timer_entry *send_entry; | ||
1131 | struct nes_timer_entry *recv_entry; | ||
1132 | struct iw_cm_id *cm_id; | ||
1133 | struct list_head *list_core, *list_node_temp; | ||
1134 | struct nes_qp *nesqp; | ||
1135 | |||
1136 | if (!cm_node) | ||
1137 | return -EINVAL; | ||
1138 | |||
1139 | spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags); | ||
1140 | if (atomic_dec_return(&cm_node->ref_count)) { | ||
1141 | spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); | ||
1142 | return 0; | ||
1143 | } | ||
1144 | list_del(&cm_node->list); | ||
1145 | atomic_dec(&cm_core->ht_node_cnt); | ||
1146 | spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); | ||
1147 | |||
1148 | /* if the node is destroyed before connection was accelerated */ | ||
1149 | if (!cm_node->accelerated && cm_node->accept_pend) { | ||
1150 | BUG_ON(!cm_node->listener); | ||
1151 | atomic_dec(&cm_node->listener->pend_accepts_cnt); | ||
1152 | BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); | ||
1153 | } | ||
1154 | |||
1155 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
1156 | list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) { | ||
1157 | send_entry = container_of(list_core, struct nes_timer_entry, list); | ||
1158 | list_del(&send_entry->list); | ||
1159 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
1160 | dev_kfree_skb_any(send_entry->skb); | ||
1161 | kfree(send_entry); | ||
1162 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | ||
1163 | continue; | ||
1164 | } | ||
1165 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | ||
1166 | |||
1167 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | ||
1168 | list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { | ||
1169 | recv_entry = container_of(list_core, struct nes_timer_entry, list); | ||
1170 | list_del(&recv_entry->list); | ||
1171 | cm_id = cm_node->cm_id; | ||
1172 | spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); | ||
1173 | if (recv_entry->type == NES_TIMER_TYPE_CLOSE) { | ||
1174 | nesqp = (struct nes_qp *)recv_entry->skb; | ||
1175 | spin_lock_irqsave(&nesqp->lock, qplockflags); | ||
1176 | if (nesqp->cm_id) { | ||
1177 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE" | ||
1178 | " with something to do!!! ******\n", | ||
1179 | nesqp->hwqp.qp_id, cm_id); | ||
1180 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
1181 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; | ||
1182 | nesqp->ibqp_state = IB_QPS_ERR; | ||
1183 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
1184 | nes_cm_disconn(nesqp); | ||
1185 | } else { | ||
1186 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
1187 | nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE" | ||
1188 | " with nothing to do!!! ******\n", | ||
1189 | nesqp->hwqp.qp_id, cm_id); | ||
1190 | nes_rem_ref(&nesqp->ibqp); | ||
1191 | } | ||
1192 | cm_id->rem_ref(cm_id); | ||
1193 | } else if (recv_entry->type == NES_TIMER_TYPE_RECV) { | ||
1194 | dev_kfree_skb_any(recv_entry->skb); | ||
1195 | } | ||
1196 | kfree(recv_entry); | ||
1197 | spin_lock_irqsave(&cm_node->recv_list_lock, flags); | ||
1198 | } | ||
1199 | spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); | ||
1200 | |||
1201 | if (cm_node->listener) { | ||
1202 | mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); | ||
1203 | } else { | ||
1204 | if (cm_node->apbvt_set && cm_node->nesvnic) { | ||
1205 | nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, | ||
1206 | PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn), | ||
1207 | NES_MANAGE_APBVT_DEL); | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | kfree(cm_node); | ||
1212 | atomic_dec(&cm_core->node_cnt); | ||
1213 | atomic_inc(&cm_nodes_destroyed); | ||
1214 | |||
1215 | return 0; | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | /** | ||
1220 | * process_options | ||
1221 | */ | ||
1222 | static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet) | ||
1223 | { | ||
1224 | u32 tmp; | ||
1225 | u32 offset = 0; | ||
1226 | union all_known_options *all_options; | ||
1227 | char got_mss_option = 0; | ||
1228 | |||
1229 | while (offset < optionsize) { | ||
1230 | all_options = (union all_known_options *)(optionsloc + offset); | ||
1231 | switch (all_options->as_base.optionnum) { | ||
1232 | case OPTION_NUMBER_END: | ||
1233 | offset = optionsize; | ||
1234 | break; | ||
1235 | case OPTION_NUMBER_NONE: | ||
1236 | offset += 1; | ||
1237 | continue; | ||
1238 | case OPTION_NUMBER_MSS: | ||
1239 | nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n", | ||
1240 | __FUNCTION__, | ||
1241 | all_options->as_mss.length, offset, optionsize); | ||
1242 | got_mss_option = 1; | ||
1243 | if (all_options->as_mss.length != 4) { | ||
1244 | return 1; | ||
1245 | } else { | ||
1246 | tmp = ntohs(all_options->as_mss.mss); | ||
1247 | if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss) | ||
1248 | cm_node->tcp_cntxt.mss = tmp; | ||
1249 | } | ||
1250 | break; | ||
1251 | case OPTION_NUMBER_WINDOW_SCALE: | ||
1252 | cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount; | ||
1253 | break; | ||
1254 | case OPTION_NUMBER_WRITE0: | ||
1255 | cm_node->send_write0 = 1; | ||
1256 | break; | ||
1257 | default: | ||
1258 | nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", | ||
1259 | all_options->as_base.optionnum); | ||
1260 | break; | ||
1261 | } | ||
1262 | offset += all_options->as_base.length; | ||
1263 | } | ||
1264 | if ((!got_mss_option) && (syn_packet)) | ||
1265 | cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS; | ||
1266 | return 0; | ||
1267 | } | ||
1268 | |||
1269 | |||
1270 | /** | ||
1271 | * process_packet | ||
1272 | */ | ||
1273 | int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, | ||
1274 | struct nes_cm_core *cm_core) | ||
1275 | { | ||
1276 | int optionsize; | ||
1277 | int datasize; | ||
1278 | int ret = 0; | ||
1279 | struct tcphdr *tcph = tcp_hdr(skb); | ||
1280 | u32 inc_sequence; | ||
1281 | if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) { | ||
1282 | inc_sequence = ntohl(tcph->seq); | ||
1283 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence; | ||
1284 | } | ||
1285 | |||
1286 | if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) { | ||
1287 | BUG_ON(!tcph); | ||
1288 | atomic_inc(&cm_accel_dropped_pkts); | ||
1289 | return -1; | ||
1290 | } | ||
1291 | |||
1292 | if (tcph->rst) { | ||
1293 | atomic_inc(&cm_resets_recvd); | ||
1294 | nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n", | ||
1295 | cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); | ||
1296 | switch (cm_node->state) { | ||
1297 | case NES_CM_STATE_LISTENING: | ||
1298 | rem_ref_cm_node(cm_core, cm_node); | ||
1299 | break; | ||
1300 | case NES_CM_STATE_TSA: | ||
1301 | case NES_CM_STATE_CLOSED: | ||
1302 | break; | ||
1303 | case NES_CM_STATE_SYN_RCVD: | ||
1304 | nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X," | ||
1305 | " remote 0x%08X:%04X, node state = %u\n", | ||
1306 | cm_node->loc_addr, cm_node->loc_port, | ||
1307 | cm_node->rem_addr, cm_node->rem_port, | ||
1308 | cm_node->state); | ||
1309 | rem_ref_cm_node(cm_core, cm_node); | ||
1310 | break; | ||
1311 | case NES_CM_STATE_ONE_SIDE_ESTABLISHED: | ||
1312 | case NES_CM_STATE_ESTABLISHED: | ||
1313 | case NES_CM_STATE_MPAREQ_SENT: | ||
1314 | default: | ||
1315 | nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X," | ||
1316 | " remote 0x%08X:%04X, node state = %u refcnt=%d\n", | ||
1317 | cm_node->loc_addr, cm_node->loc_port, | ||
1318 | cm_node->rem_addr, cm_node->rem_port, | ||
1319 | cm_node->state, atomic_read(&cm_node->ref_count)); | ||
1320 | // create event | ||
1321 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1322 | |||
1323 | create_event(cm_node, NES_CM_EVENT_ABORTED); | ||
1324 | break; | ||
1325 | |||
1326 | } | ||
1327 | return -1; | ||
1328 | } | ||
1329 | |||
1330 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | ||
1331 | |||
1332 | skb_pull(skb, ip_hdr(skb)->ihl << 2); | ||
1333 | skb_pull(skb, tcph->doff << 2); | ||
1334 | |||
1335 | datasize = skb->len; | ||
1336 | inc_sequence = ntohl(tcph->seq); | ||
1337 | nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X," | ||
1338 | " rcv_nxt = 0x%08X Flags: %s %s.\n", | ||
1339 | datasize, inc_sequence, ntohl(tcph->ack_seq), | ||
1340 | cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""), | ||
1341 | (tcph->ack ? "ACK":"")); | ||
1342 | |||
1343 | if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt) | ||
1344 | ) { | ||
1345 | nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X," | ||
1346 | " ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n", | ||
1347 | datasize, inc_sequence, ntohl(tcph->ack_seq), | ||
1348 | cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":"")); | ||
1349 | if (cm_node->state == NES_CM_STATE_LISTENING) { | ||
1350 | rem_ref_cm_node(cm_core, cm_node); | ||
1351 | } | ||
1352 | return -1; | ||
1353 | } | ||
1354 | |||
1355 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | ||
1356 | |||
1357 | |||
1358 | if (optionsize) { | ||
1359 | u8 *optionsloc = (u8 *)&tcph[1]; | ||
1360 | if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) { | ||
1361 | nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node); | ||
1362 | send_reset(cm_node); | ||
1363 | if (cm_node->state != NES_CM_STATE_SYN_SENT) | ||
1364 | rem_ref_cm_node(cm_core, cm_node); | ||
1365 | return 0; | ||
1366 | } | ||
1367 | } else if (tcph->syn) | ||
1368 | cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS; | ||
1369 | |||
1370 | cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << | ||
1371 | cm_node->tcp_cntxt.snd_wscale; | ||
1372 | |||
1373 | if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) { | ||
1374 | cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; | ||
1375 | } | ||
1376 | |||
1377 | if (tcph->ack) { | ||
1378 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | ||
1379 | switch (cm_node->state) { | ||
1380 | case NES_CM_STATE_SYN_RCVD: | ||
1381 | case NES_CM_STATE_SYN_SENT: | ||
1382 | /* read and stash current sequence number */ | ||
1383 | if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) { | ||
1384 | nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !=" | ||
1385 | " cm_node->tcp_cntxt.loc_seq_num\n"); | ||
1386 | send_reset(cm_node); | ||
1387 | return 0; | ||
1388 | } | ||
1389 | if (cm_node->state == NES_CM_STATE_SYN_SENT) | ||
1390 | cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED; | ||
1391 | else { | ||
1392 | cm_node->state = NES_CM_STATE_ESTABLISHED; | ||
1393 | } | ||
1394 | break; | ||
1395 | case NES_CM_STATE_LAST_ACK: | ||
1396 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1397 | break; | ||
1398 | case NES_CM_STATE_FIN_WAIT1: | ||
1399 | cm_node->state = NES_CM_STATE_FIN_WAIT2; | ||
1400 | break; | ||
1401 | case NES_CM_STATE_CLOSING: | ||
1402 | cm_node->state = NES_CM_STATE_TIME_WAIT; | ||
1403 | /* need to schedule this to happen in 2MSL timeouts */ | ||
1404 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1405 | break; | ||
1406 | case NES_CM_STATE_ONE_SIDE_ESTABLISHED: | ||
1407 | case NES_CM_STATE_ESTABLISHED: | ||
1408 | case NES_CM_STATE_MPAREQ_SENT: | ||
1409 | case NES_CM_STATE_CLOSE_WAIT: | ||
1410 | case NES_CM_STATE_TIME_WAIT: | ||
1411 | case NES_CM_STATE_CLOSED: | ||
1412 | break; | ||
1413 | case NES_CM_STATE_LISTENING: | ||
1414 | nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn); | ||
1415 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | ||
1416 | send_reset(cm_node); | ||
1417 | /* send_reset bumps refcount, this should have been a new node */ | ||
1418 | rem_ref_cm_node(cm_core, cm_node); | ||
1419 | return -1; | ||
1420 | break; | ||
1421 | case NES_CM_STATE_TSA: | ||
1422 | nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n"); | ||
1423 | break; | ||
1424 | case NES_CM_STATE_UNKNOWN: | ||
1425 | case NES_CM_STATE_INITED: | ||
1426 | case NES_CM_STATE_ACCEPTING: | ||
1427 | case NES_CM_STATE_FIN_WAIT2: | ||
1428 | default: | ||
1429 | nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n", | ||
1430 | cm_node->state); | ||
1431 | send_reset(cm_node); | ||
1432 | break; | ||
1433 | } | ||
1434 | } | ||
1435 | |||
1436 | if (tcph->syn) { | ||
1437 | if (cm_node->state == NES_CM_STATE_LISTENING) { | ||
1438 | /* do not exceed backlog */ | ||
1439 | atomic_inc(&cm_node->listener->pend_accepts_cnt); | ||
1440 | if (atomic_read(&cm_node->listener->pend_accepts_cnt) > | ||
1441 | cm_node->listener->backlog) { | ||
1442 | nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n"); | ||
1443 | cm_backlog_drops++; | ||
1444 | atomic_dec(&cm_node->listener->pend_accepts_cnt); | ||
1445 | rem_ref_cm_node(cm_core, cm_node); | ||
1446 | return 0; | ||
1447 | } | ||
1448 | cm_node->accept_pend = 1; | ||
1449 | |||
1450 | } | ||
1451 | if (datasize == 0) | ||
1452 | cm_node->tcp_cntxt.rcv_nxt ++; | ||
1453 | |||
1454 | if (cm_node->state == NES_CM_STATE_LISTENING) { | ||
1455 | cm_node->state = NES_CM_STATE_SYN_RCVD; | ||
1456 | send_syn(cm_node, 1); | ||
1457 | } | ||
1458 | if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) { | ||
1459 | cm_node->state = NES_CM_STATE_ESTABLISHED; | ||
1460 | /* send final handshake ACK */ | ||
1461 | ret = send_ack(cm_node); | ||
1462 | if (ret < 0) | ||
1463 | return ret; | ||
1464 | |||
1465 | cm_node->state = NES_CM_STATE_MPAREQ_SENT; | ||
1466 | ret = send_mpa_request(cm_node); | ||
1467 | if (ret < 0) | ||
1468 | return ret; | ||
1469 | } | ||
1470 | } | ||
1471 | |||
1472 | if (tcph->fin) { | ||
1473 | cm_node->tcp_cntxt.rcv_nxt++; | ||
1474 | switch (cm_node->state) { | ||
1475 | case NES_CM_STATE_SYN_RCVD: | ||
1476 | case NES_CM_STATE_SYN_SENT: | ||
1477 | case NES_CM_STATE_ONE_SIDE_ESTABLISHED: | ||
1478 | case NES_CM_STATE_ESTABLISHED: | ||
1479 | case NES_CM_STATE_ACCEPTING: | ||
1480 | case NES_CM_STATE_MPAREQ_SENT: | ||
1481 | cm_node->state = NES_CM_STATE_CLOSE_WAIT; | ||
1482 | cm_node->state = NES_CM_STATE_LAST_ACK; | ||
1483 | ret = send_fin(cm_node, NULL); | ||
1484 | break; | ||
1485 | case NES_CM_STATE_FIN_WAIT1: | ||
1486 | cm_node->state = NES_CM_STATE_CLOSING; | ||
1487 | ret = send_ack(cm_node); | ||
1488 | break; | ||
1489 | case NES_CM_STATE_FIN_WAIT2: | ||
1490 | cm_node->state = NES_CM_STATE_TIME_WAIT; | ||
1491 | cm_node->tcp_cntxt.loc_seq_num ++; | ||
1492 | ret = send_ack(cm_node); | ||
1493 | /* need to schedule this to happen in 2MSL timeouts */ | ||
1494 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1495 | break; | ||
1496 | case NES_CM_STATE_CLOSE_WAIT: | ||
1497 | case NES_CM_STATE_LAST_ACK: | ||
1498 | case NES_CM_STATE_CLOSING: | ||
1499 | case NES_CM_STATE_TSA: | ||
1500 | default: | ||
1501 | nes_debug(NES_DBG_CM, "Received a fin while in %x state\n", | ||
1502 | cm_node->state); | ||
1503 | ret = -EINVAL; | ||
1504 | break; | ||
1505 | } | ||
1506 | } | ||
1507 | |||
1508 | if (datasize) { | ||
1509 | u8 *dataloc = skb->data; | ||
1510 | /* figure out what state we are in and handle transition to next state */ | ||
1511 | switch (cm_node->state) { | ||
1512 | case NES_CM_STATE_LISTENING: | ||
1513 | case NES_CM_STATE_SYN_RCVD: | ||
1514 | case NES_CM_STATE_SYN_SENT: | ||
1515 | case NES_CM_STATE_FIN_WAIT1: | ||
1516 | case NES_CM_STATE_FIN_WAIT2: | ||
1517 | case NES_CM_STATE_CLOSE_WAIT: | ||
1518 | case NES_CM_STATE_LAST_ACK: | ||
1519 | case NES_CM_STATE_CLOSING: | ||
1520 | break; | ||
1521 | case NES_CM_STATE_MPAREQ_SENT: | ||
1522 | /* recv the mpa res frame, ret=frame len (incl priv data) */ | ||
1523 | ret = parse_mpa(cm_node, dataloc, datasize); | ||
1524 | if (ret < 0) | ||
1525 | break; | ||
1526 | /* set the req frame payload len in skb */ | ||
1527 | /* we are done handling this state, set node to a TSA state */ | ||
1528 | cm_node->state = NES_CM_STATE_TSA; | ||
1529 | send_ack(cm_node); | ||
1530 | create_event(cm_node, NES_CM_EVENT_CONNECTED); | ||
1531 | break; | ||
1532 | |||
1533 | case NES_CM_STATE_ESTABLISHED: | ||
1534 | /* we are expecting an MPA req frame */ | ||
1535 | ret = parse_mpa(cm_node, dataloc, datasize); | ||
1536 | if (ret < 0) { | ||
1537 | break; | ||
1538 | } | ||
1539 | cm_node->state = NES_CM_STATE_TSA; | ||
1540 | send_ack(cm_node); | ||
1541 | /* we got a valid MPA request, create an event */ | ||
1542 | create_event(cm_node, NES_CM_EVENT_MPA_REQ); | ||
1543 | break; | ||
1544 | case NES_CM_STATE_TSA: | ||
1545 | handle_exception_pkt(cm_node, skb); | ||
1546 | break; | ||
1547 | case NES_CM_STATE_UNKNOWN: | ||
1548 | case NES_CM_STATE_INITED: | ||
1549 | default: | ||
1550 | ret = -1; | ||
1551 | } | ||
1552 | } | ||
1553 | |||
1554 | return ret; | ||
1555 | } | ||
1556 | |||
1557 | |||
1558 | /** | ||
1559 | * mini_cm_listen - create a listen node with params | ||
1560 | */ | ||
1561 | static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, | ||
1562 | struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) | ||
1563 | { | ||
1564 | struct nes_cm_listener *listener; | ||
1565 | unsigned long flags; | ||
1566 | |||
1567 | nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", | ||
1568 | cm_info->loc_addr, cm_info->loc_port); | ||
1569 | |||
1570 | /* cannot have multiple matching listeners */ | ||
1571 | listener = find_listener(cm_core, htonl(cm_info->loc_addr), | ||
1572 | htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE); | ||
1573 | if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { | ||
1574 | /* find automatically incs ref count ??? */ | ||
1575 | atomic_dec(&listener->ref_count); | ||
1576 | nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n"); | ||
1577 | return NULL; | ||
1578 | } | ||
1579 | |||
1580 | if (!listener) { | ||
1581 | /* create a CM listen node (1/2 node to compare incoming traffic to) */ | ||
1582 | listener = kzalloc(sizeof(*listener), GFP_ATOMIC); | ||
1583 | if (!listener) { | ||
1584 | nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n"); | ||
1585 | return NULL; | ||
1586 | } | ||
1587 | |||
1588 | memset(listener, 0, sizeof(struct nes_cm_listener)); | ||
1589 | listener->loc_addr = htonl(cm_info->loc_addr); | ||
1590 | listener->loc_port = htons(cm_info->loc_port); | ||
1591 | listener->reused_node = 0; | ||
1592 | |||
1593 | atomic_set(&listener->ref_count, 1); | ||
1594 | } | ||
1595 | /* pasive case */ | ||
1596 | /* find already inc'ed the ref count */ | ||
1597 | else { | ||
1598 | listener->reused_node = 1; | ||
1599 | } | ||
1600 | |||
1601 | listener->cm_id = cm_info->cm_id; | ||
1602 | atomic_set(&listener->pend_accepts_cnt, 0); | ||
1603 | listener->cm_core = cm_core; | ||
1604 | listener->nesvnic = nesvnic; | ||
1605 | atomic_inc(&cm_core->node_cnt); | ||
1606 | atomic_inc(&cm_core->session_id); | ||
1607 | |||
1608 | listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid); | ||
1609 | listener->conn_type = cm_info->conn_type; | ||
1610 | listener->backlog = cm_info->backlog; | ||
1611 | listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE; | ||
1612 | |||
1613 | if (!listener->reused_node) { | ||
1614 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | ||
1615 | list_add(&listener->list, &cm_core->listen_list.list); | ||
1616 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | ||
1617 | atomic_inc(&cm_core->listen_node_cnt); | ||
1618 | } | ||
1619 | |||
1620 | nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x," | ||
1621 | " listener = %p, backlog = %d, cm_id = %p.\n", | ||
1622 | cm_info->loc_addr, cm_info->loc_port, | ||
1623 | listener, listener->backlog, listener->cm_id); | ||
1624 | |||
1625 | return listener; | ||
1626 | } | ||
1627 | |||
1628 | |||
1629 | /** | ||
1630 | * mini_cm_connect - make a connection node with params | ||
1631 | */ | ||
1632 | struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | ||
1633 | struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame, | ||
1634 | struct nes_cm_info *cm_info) | ||
1635 | { | ||
1636 | int ret = 0; | ||
1637 | struct nes_cm_node *cm_node; | ||
1638 | struct nes_cm_listener *loopbackremotelistener; | ||
1639 | struct nes_cm_node *loopbackremotenode; | ||
1640 | struct nes_cm_info loopback_cm_info; | ||
1641 | |||
1642 | u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + | ||
1643 | ntohs(mpa_frame->priv_data_len); | ||
1644 | |||
1645 | cm_info->loc_addr = htonl(cm_info->loc_addr); | ||
1646 | cm_info->rem_addr = htonl(cm_info->rem_addr); | ||
1647 | cm_info->loc_port = htons(cm_info->loc_port); | ||
1648 | cm_info->rem_port = htons(cm_info->rem_port); | ||
1649 | |||
1650 | /* create a CM connection node */ | ||
1651 | cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); | ||
1652 | if (!cm_node) | ||
1653 | return NULL; | ||
1654 | |||
1655 | // set our node side to client (active) side | ||
1656 | cm_node->tcp_cntxt.client = 1; | ||
1657 | cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; | ||
1658 | |||
1659 | if (cm_info->loc_addr == cm_info->rem_addr) { | ||
1660 | loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr, | ||
1661 | cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE); | ||
1662 | if (loopbackremotelistener == NULL) { | ||
1663 | create_event(cm_node, NES_CM_EVENT_ABORTED); | ||
1664 | } else { | ||
1665 | atomic_inc(&cm_loopbacks); | ||
1666 | loopback_cm_info = *cm_info; | ||
1667 | loopback_cm_info.loc_port = cm_info->rem_port; | ||
1668 | loopback_cm_info.rem_port = cm_info->loc_port; | ||
1669 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; | ||
1670 | loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info, | ||
1671 | loopbackremotelistener); | ||
1672 | loopbackremotenode->loopbackpartner = cm_node; | ||
1673 | loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; | ||
1674 | cm_node->loopbackpartner = loopbackremotenode; | ||
1675 | memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data, | ||
1676 | mpa_frame_size); | ||
1677 | loopbackremotenode->mpa_frame_size = mpa_frame_size - | ||
1678 | sizeof(struct ietf_mpa_frame); | ||
1679 | |||
1680 | // we are done handling this state, set node to a TSA state | ||
1681 | cm_node->state = NES_CM_STATE_TSA; | ||
1682 | cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num; | ||
1683 | loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num; | ||
1684 | cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd; | ||
1685 | loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd; | ||
1686 | cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd; | ||
1687 | loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; | ||
1688 | cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale; | ||
1689 | loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; | ||
1690 | |||
1691 | create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ); | ||
1692 | } | ||
1693 | return cm_node; | ||
1694 | } | ||
1695 | |||
1696 | /* set our node side to client (active) side */ | ||
1697 | cm_node->tcp_cntxt.client = 1; | ||
1698 | /* init our MPA frame ptr */ | ||
1699 | memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size); | ||
1700 | cm_node->mpa_frame_size = mpa_frame_size; | ||
1701 | |||
1702 | /* send a syn and goto syn sent state */ | ||
1703 | cm_node->state = NES_CM_STATE_SYN_SENT; | ||
1704 | ret = send_syn(cm_node, 0); | ||
1705 | |||
1706 | nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x," | ||
1707 | " cm_node=%p, cm_id = %p.\n", | ||
1708 | cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id); | ||
1709 | |||
1710 | return cm_node; | ||
1711 | } | ||
1712 | |||
1713 | |||
1714 | /** | ||
1715 | * mini_cm_accept - accept a connection | ||
1716 | * This function is never called | ||
1717 | */ | ||
1718 | int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, | ||
1719 | struct nes_cm_node *cm_node) | ||
1720 | { | ||
1721 | return 0; | ||
1722 | } | ||
1723 | |||
1724 | |||
1725 | /** | ||
1726 | * mini_cm_reject - reject and teardown a connection | ||
1727 | */ | ||
1728 | int mini_cm_reject(struct nes_cm_core *cm_core, | ||
1729 | struct ietf_mpa_frame *mpa_frame, | ||
1730 | struct nes_cm_node *cm_node) | ||
1731 | { | ||
1732 | int ret = 0; | ||
1733 | struct sk_buff *skb; | ||
1734 | u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + | ||
1735 | ntohs(mpa_frame->priv_data_len); | ||
1736 | |||
1737 | skb = get_free_pkt(cm_node); | ||
1738 | if (!skb) { | ||
1739 | nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); | ||
1740 | return -1; | ||
1741 | } | ||
1742 | |||
1743 | /* send an MPA Request frame */ | ||
1744 | form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN); | ||
1745 | ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); | ||
1746 | |||
1747 | cm_node->state = NES_CM_STATE_CLOSED; | ||
1748 | ret = send_fin(cm_node, NULL); | ||
1749 | |||
1750 | if (ret < 0) { | ||
1751 | printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n"); | ||
1752 | return ret; | ||
1753 | } | ||
1754 | |||
1755 | return ret; | ||
1756 | } | ||
1757 | |||
1758 | |||
1759 | /** | ||
1760 | * mini_cm_close | ||
1761 | */ | ||
1762 | int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) | ||
1763 | { | ||
1764 | int ret = 0; | ||
1765 | |||
1766 | if (!cm_core || !cm_node) | ||
1767 | return -EINVAL; | ||
1768 | |||
1769 | switch (cm_node->state) { | ||
1770 | /* if passed in node is null, create a reference key node for node search */ | ||
1771 | /* check if we found an owner node for this pkt */ | ||
1772 | case NES_CM_STATE_SYN_RCVD: | ||
1773 | case NES_CM_STATE_SYN_SENT: | ||
1774 | case NES_CM_STATE_ONE_SIDE_ESTABLISHED: | ||
1775 | case NES_CM_STATE_ESTABLISHED: | ||
1776 | case NES_CM_STATE_ACCEPTING: | ||
1777 | case NES_CM_STATE_MPAREQ_SENT: | ||
1778 | cm_node->state = NES_CM_STATE_FIN_WAIT1; | ||
1779 | send_fin(cm_node, NULL); | ||
1780 | break; | ||
1781 | case NES_CM_STATE_CLOSE_WAIT: | ||
1782 | cm_node->state = NES_CM_STATE_LAST_ACK; | ||
1783 | send_fin(cm_node, NULL); | ||
1784 | break; | ||
1785 | case NES_CM_STATE_FIN_WAIT1: | ||
1786 | case NES_CM_STATE_FIN_WAIT2: | ||
1787 | case NES_CM_STATE_LAST_ACK: | ||
1788 | case NES_CM_STATE_TIME_WAIT: | ||
1789 | case NES_CM_STATE_CLOSING: | ||
1790 | ret = -1; | ||
1791 | break; | ||
1792 | case NES_CM_STATE_LISTENING: | ||
1793 | case NES_CM_STATE_UNKNOWN: | ||
1794 | case NES_CM_STATE_INITED: | ||
1795 | case NES_CM_STATE_CLOSED: | ||
1796 | case NES_CM_STATE_TSA: | ||
1797 | ret = rem_ref_cm_node(cm_core, cm_node); | ||
1798 | break; | ||
1799 | } | ||
1800 | cm_node->cm_id = NULL; | ||
1801 | return ret; | ||
1802 | } | ||
1803 | |||
1804 | |||
1805 | /** | ||
1806 | * recv_pkt - recv an ETHERNET packet, and process it through CM | ||
1807 | * node state machine | ||
1808 | */ | ||
1809 | int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, | ||
1810 | struct sk_buff *skb) | ||
1811 | { | ||
1812 | struct nes_cm_node *cm_node = NULL; | ||
1813 | struct nes_cm_listener *listener = NULL; | ||
1814 | struct iphdr *iph; | ||
1815 | struct tcphdr *tcph; | ||
1816 | struct nes_cm_info nfo; | ||
1817 | int ret = 0; | ||
1818 | |||
1819 | if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { | ||
1820 | ret = -EINVAL; | ||
1821 | goto out; | ||
1822 | } | ||
1823 | |||
1824 | iph = (struct iphdr *)skb->data; | ||
1825 | tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); | ||
1826 | skb_reset_network_header(skb); | ||
1827 | skb_set_transport_header(skb, sizeof(*tcph)); | ||
1828 | skb->len = ntohs(iph->tot_len); | ||
1829 | |||
1830 | nfo.loc_addr = ntohl(iph->daddr); | ||
1831 | nfo.loc_port = ntohs(tcph->dest); | ||
1832 | nfo.rem_addr = ntohl(iph->saddr); | ||
1833 | nfo.rem_port = ntohs(tcph->source); | ||
1834 | |||
1835 | nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", | ||
1836 | iph->daddr, tcph->dest, iph->saddr, tcph->source); | ||
1837 | |||
1838 | /* note: this call is going to increment cm_node ref count */ | ||
1839 | cm_node = find_node(cm_core, | ||
1840 | nfo.rem_port, nfo.rem_addr, | ||
1841 | nfo.loc_port, nfo.loc_addr); | ||
1842 | |||
1843 | if (!cm_node) { | ||
1844 | listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port, | ||
1845 | NES_CM_LISTENER_ACTIVE_STATE); | ||
1846 | if (listener) { | ||
1847 | nfo.cm_id = listener->cm_id; | ||
1848 | nfo.conn_type = listener->conn_type; | ||
1849 | } else { | ||
1850 | nfo.cm_id = NULL; | ||
1851 | nfo.conn_type = 0; | ||
1852 | } | ||
1853 | |||
1854 | cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener); | ||
1855 | if (!cm_node) { | ||
1856 | nes_debug(NES_DBG_CM, "Unable to allocate node\n"); | ||
1857 | if (listener) { | ||
1858 | nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n"); | ||
1859 | atomic_dec(&listener->ref_count); | ||
1860 | } | ||
1861 | ret = -1; | ||
1862 | goto out; | ||
1863 | } | ||
1864 | if (!listener) { | ||
1865 | nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n", | ||
1866 | nfo.loc_port, atomic_read(&cm_node->ref_count)); | ||
1867 | if (!tcph->rst) { | ||
1868 | nes_debug(NES_DBG_CM, "Packet found for unknown port=%d" | ||
1869 | " rem_port=%d refcnt=%d\n", | ||
1870 | nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count)); | ||
1871 | |||
1872 | cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq); | ||
1873 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | ||
1874 | send_reset(cm_node); | ||
1875 | } | ||
1876 | rem_ref_cm_node(cm_core, cm_node); | ||
1877 | ret = -1; | ||
1878 | goto out; | ||
1879 | } | ||
1880 | add_ref_cm_node(cm_node); | ||
1881 | cm_node->state = NES_CM_STATE_LISTENING; | ||
1882 | } | ||
1883 | |||
1884 | nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n", | ||
1885 | cm_node, skb->data); | ||
1886 | process_packet(cm_node, skb, cm_core); | ||
1887 | |||
1888 | rem_ref_cm_node(cm_core, cm_node); | ||
1889 | out: | ||
1890 | if (skb) | ||
1891 | dev_kfree_skb_any(skb); | ||
1892 | return ret; | ||
1893 | } | ||
1894 | |||
1895 | |||
1896 | /** | ||
1897 | * nes_cm_alloc_core - allocate a top level instance of a cm core | ||
1898 | */ | ||
1899 | struct nes_cm_core *nes_cm_alloc_core(void) | ||
1900 | { | ||
1901 | int i; | ||
1902 | |||
1903 | struct nes_cm_core *cm_core; | ||
1904 | struct sk_buff *skb = NULL; | ||
1905 | |||
1906 | /* setup the CM core */ | ||
1907 | /* alloc top level core control structure */ | ||
1908 | cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL); | ||
1909 | if (!cm_core) | ||
1910 | return NULL; | ||
1911 | |||
1912 | INIT_LIST_HEAD(&cm_core->connected_nodes); | ||
1913 | init_timer(&cm_core->tcp_timer); | ||
1914 | cm_core->tcp_timer.function = nes_cm_timer_tick; | ||
1915 | |||
1916 | cm_core->mtu = NES_CM_DEFAULT_MTU; | ||
1917 | cm_core->state = NES_CM_STATE_INITED; | ||
1918 | cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; | ||
1919 | |||
1920 | atomic_set(&cm_core->session_id, 0); | ||
1921 | atomic_set(&cm_core->events_posted, 0); | ||
1922 | |||
1923 | /* init the packet lists */ | ||
1924 | skb_queue_head_init(&cm_core->tx_free_list); | ||
1925 | |||
1926 | for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) { | ||
1927 | skb = dev_alloc_skb(cm_core->mtu); | ||
1928 | if (!skb) { | ||
1929 | kfree(cm_core); | ||
1930 | return NULL; | ||
1931 | } | ||
1932 | /* add 'raw' skb to free frame list */ | ||
1933 | skb_queue_head(&cm_core->tx_free_list, skb); | ||
1934 | } | ||
1935 | |||
1936 | cm_core->api = &nes_cm_api; | ||
1937 | |||
1938 | spin_lock_init(&cm_core->ht_lock); | ||
1939 | spin_lock_init(&cm_core->listen_list_lock); | ||
1940 | |||
1941 | INIT_LIST_HEAD(&cm_core->listen_list.list); | ||
1942 | |||
1943 | nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core); | ||
1944 | |||
1945 | nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n"); | ||
1946 | cm_core->event_wq = create_singlethread_workqueue("nesewq"); | ||
1947 | cm_core->post_event = nes_cm_post_event; | ||
1948 | nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n"); | ||
1949 | cm_core->disconn_wq = create_singlethread_workqueue("nesdwq"); | ||
1950 | |||
1951 | print_core(cm_core); | ||
1952 | return cm_core; | ||
1953 | } | ||
1954 | |||
1955 | |||
1956 | /** | ||
1957 | * mini_cm_dealloc_core - deallocate a top level instance of a cm core | ||
1958 | */ | ||
1959 | int mini_cm_dealloc_core(struct nes_cm_core *cm_core) | ||
1960 | { | ||
1961 | nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core); | ||
1962 | |||
1963 | if (!cm_core) | ||
1964 | return -EINVAL; | ||
1965 | |||
1966 | barrier(); | ||
1967 | |||
1968 | if (timer_pending(&cm_core->tcp_timer)) { | ||
1969 | del_timer(&cm_core->tcp_timer); | ||
1970 | } | ||
1971 | |||
1972 | destroy_workqueue(cm_core->event_wq); | ||
1973 | destroy_workqueue(cm_core->disconn_wq); | ||
1974 | nes_debug(NES_DBG_CM, "\n"); | ||
1975 | kfree(cm_core); | ||
1976 | |||
1977 | return 0; | ||
1978 | } | ||
1979 | |||
1980 | |||
1981 | /** | ||
1982 | * mini_cm_get | ||
1983 | */ | ||
1984 | int mini_cm_get(struct nes_cm_core *cm_core) | ||
1985 | { | ||
1986 | return cm_core->state; | ||
1987 | } | ||
1988 | |||
1989 | |||
1990 | /** | ||
1991 | * mini_cm_set | ||
1992 | */ | ||
1993 | int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value) | ||
1994 | { | ||
1995 | int ret = 0; | ||
1996 | |||
1997 | switch (type) { | ||
1998 | case NES_CM_SET_PKT_SIZE: | ||
1999 | cm_core->mtu = value; | ||
2000 | break; | ||
2001 | case NES_CM_SET_FREE_PKT_Q_SIZE: | ||
2002 | cm_core->free_tx_pkt_max = value; | ||
2003 | break; | ||
2004 | default: | ||
2005 | /* unknown set option */ | ||
2006 | ret = -EINVAL; | ||
2007 | } | ||
2008 | |||
2009 | return ret; | ||
2010 | } | ||
2011 | |||
2012 | |||
2013 | /** | ||
2014 | * nes_cm_init_tsa_conn setup HW; MPA frames must be | ||
2015 | * successfully exchanged when this is called | ||
2016 | */ | ||
2017 | static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node) | ||
2018 | { | ||
2019 | int ret = 0; | ||
2020 | |||
2021 | if (!nesqp) | ||
2022 | return -EINVAL; | ||
2023 | |||
2024 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 | | ||
2025 | NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG | | ||
2026 | NES_QPCONTEXT_MISC_DROS); | ||
2027 | |||
2028 | if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale) | ||
2029 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE); | ||
2030 | |||
2031 | nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT); | ||
2032 | |||
2033 | nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); | ||
2034 | |||
2035 | nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( | ||
2036 | (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT); | ||
2037 | |||
2038 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( | ||
2039 | (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) & | ||
2040 | NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK); | ||
2041 | |||
2042 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32( | ||
2043 | (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) & | ||
2044 | NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK); | ||
2045 | |||
2046 | nesqp->nesqp_context->keepalive = cpu_to_le32(0x80); | ||
2047 | nesqp->nesqp_context->ts_recent = 0; | ||
2048 | nesqp->nesqp_context->ts_age = 0; | ||
2049 | nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | ||
2050 | nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); | ||
2051 | nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | ||
2052 | nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << | ||
2053 | cm_node->tcp_cntxt.rcv_wscale); | ||
2054 | nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | ||
2055 | nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | ||
2056 | nesqp->nesqp_context->srtt = 0; | ||
2057 | nesqp->nesqp_context->rttvar = cpu_to_le32(0x6); | ||
2058 | nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); | ||
2059 | nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss); | ||
2060 | nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | ||
2061 | nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | ||
2062 | nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); | ||
2063 | |||
2064 | nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X," | ||
2065 | " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n", | ||
2066 | nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt), | ||
2067 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), | ||
2068 | cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale), | ||
2069 | le32_to_cpu(nesqp->nesqp_context->rcv_wnd), | ||
2070 | le32_to_cpu(nesqp->nesqp_context->misc)); | ||
2071 | nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd)); | ||
2072 | nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd)); | ||
2073 | nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd)); | ||
2074 | |||
2075 | nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n"); | ||
2076 | cm_node->state = NES_CM_STATE_TSA; | ||
2077 | |||
2078 | return ret; | ||
2079 | } | ||
2080 | |||
2081 | |||
2082 | /** | ||
2083 | * nes_cm_disconn | ||
2084 | */ | ||
2085 | int nes_cm_disconn(struct nes_qp *nesqp) | ||
2086 | { | ||
2087 | unsigned long flags; | ||
2088 | |||
2089 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2090 | if (nesqp->disconn_pending == 0) { | ||
2091 | nesqp->disconn_pending++; | ||
2092 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2093 | /* nes_add_ref(&nesqp->ibqp); */ | ||
2094 | /* init our disconnect work element, to */ | ||
2095 | INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker); | ||
2096 | |||
2097 | queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); | ||
2098 | } else { | ||
2099 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2100 | nes_rem_ref(&nesqp->ibqp); | ||
2101 | } | ||
2102 | |||
2103 | return 0; | ||
2104 | } | ||
2105 | |||
2106 | |||
2107 | /** | ||
2108 | * nes_disconnect_worker | ||
2109 | */ | ||
2110 | void nes_disconnect_worker(struct work_struct *work) | ||
2111 | { | ||
2112 | struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); | ||
2113 | |||
2114 | nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", | ||
2115 | nesqp->last_aeq, nesqp->hwqp.qp_id); | ||
2116 | nes_cm_disconn_true(nesqp); | ||
2117 | } | ||
2118 | |||
2119 | |||
2120 | /** | ||
2121 | * nes_cm_disconn_true | ||
2122 | */ | ||
2123 | int nes_cm_disconn_true(struct nes_qp *nesqp) | ||
2124 | { | ||
2125 | unsigned long flags; | ||
2126 | int ret = 0; | ||
2127 | struct iw_cm_id *cm_id; | ||
2128 | struct iw_cm_event cm_event; | ||
2129 | struct nes_vnic *nesvnic; | ||
2130 | u16 last_ae; | ||
2131 | u8 original_hw_tcp_state; | ||
2132 | u8 original_ibqp_state; | ||
2133 | u8 issued_disconnect_reset = 0; | ||
2134 | |||
2135 | if (!nesqp) { | ||
2136 | nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); | ||
2137 | return -1; | ||
2138 | } | ||
2139 | |||
2140 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2141 | cm_id = nesqp->cm_id; | ||
2142 | /* make sure we havent already closed this connection */ | ||
2143 | if (!cm_id) { | ||
2144 | nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", | ||
2145 | nesqp->hwqp.qp_id); | ||
2146 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2147 | nes_rem_ref(&nesqp->ibqp); | ||
2148 | return -1; | ||
2149 | } | ||
2150 | |||
2151 | nesvnic = to_nesvnic(nesqp->ibqp.device); | ||
2152 | nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id); | ||
2153 | |||
2154 | original_hw_tcp_state = nesqp->hw_tcp_state; | ||
2155 | original_ibqp_state = nesqp->ibqp_state; | ||
2156 | last_ae = nesqp->last_aeq; | ||
2157 | |||
2158 | |||
2159 | nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); | ||
2160 | |||
2161 | if ((nesqp->cm_id) && (cm_id->event_handler)) { | ||
2162 | if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || | ||
2163 | ((original_ibqp_state == IB_QPS_RTS) && | ||
2164 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2165 | atomic_inc(&cm_disconnects); | ||
2166 | cm_event.event = IW_CM_EVENT_DISCONNECT; | ||
2167 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { | ||
2168 | issued_disconnect_reset = 1; | ||
2169 | cm_event.status = IW_CM_EVENT_STATUS_RESET; | ||
2170 | nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for " | ||
2171 | " QP%u, cm_id = %p. \n", | ||
2172 | nesqp->hwqp.qp_id, cm_id); | ||
2173 | } else { | ||
2174 | cm_event.status = IW_CM_EVENT_STATUS_OK; | ||
2175 | } | ||
2176 | |||
2177 | cm_event.local_addr = cm_id->local_addr; | ||
2178 | cm_event.remote_addr = cm_id->remote_addr; | ||
2179 | cm_event.private_data = NULL; | ||
2180 | cm_event.private_data_len = 0; | ||
2181 | |||
2182 | nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for " | ||
2183 | " QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n", | ||
2184 | nesqp->hwqp.qp_id, | ||
2185 | nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id, | ||
2186 | atomic_read(&nesqp->refcount)); | ||
2187 | |||
2188 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2189 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
2190 | if (ret) | ||
2191 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | ||
2192 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2193 | } | ||
2194 | |||
2195 | nesqp->disconn_pending = 0; | ||
2196 | /* There might have been another AE while the lock was released */ | ||
2197 | original_hw_tcp_state = nesqp->hw_tcp_state; | ||
2198 | original_ibqp_state = nesqp->ibqp_state; | ||
2199 | last_ae = nesqp->last_aeq; | ||
2200 | |||
2201 | if ((issued_disconnect_reset == 0) && (nesqp->cm_id) && | ||
2202 | ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | ||
2203 | (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) || | ||
2204 | (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) || | ||
2205 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2206 | atomic_inc(&cm_closes); | ||
2207 | nesqp->cm_id = NULL; | ||
2208 | nesqp->in_disconnect = 0; | ||
2209 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2210 | nes_disconnect(nesqp, 1); | ||
2211 | |||
2212 | cm_id->provider_data = nesqp; | ||
2213 | /* Send up the close complete event */ | ||
2214 | cm_event.event = IW_CM_EVENT_CLOSE; | ||
2215 | cm_event.status = IW_CM_EVENT_STATUS_OK; | ||
2216 | cm_event.provider_data = cm_id->provider_data; | ||
2217 | cm_event.local_addr = cm_id->local_addr; | ||
2218 | cm_event.remote_addr = cm_id->remote_addr; | ||
2219 | cm_event.private_data = NULL; | ||
2220 | cm_event.private_data_len = 0; | ||
2221 | |||
2222 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
2223 | if (ret) { | ||
2224 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | ||
2225 | } | ||
2226 | |||
2227 | cm_id->rem_ref(cm_id); | ||
2228 | |||
2229 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2230 | if (nesqp->flush_issued == 0) { | ||
2231 | nesqp->flush_issued = 1; | ||
2232 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2233 | flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1); | ||
2234 | } else { | ||
2235 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2236 | } | ||
2237 | |||
2238 | /* This reference is from either ModifyQP or the AE processing, | ||
2239 | there is still a race here with modifyqp */ | ||
2240 | nes_rem_ref(&nesqp->ibqp); | ||
2241 | |||
2242 | } else { | ||
2243 | cm_id = nesqp->cm_id; | ||
2244 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2245 | /* check to see if the inbound reset beat the outbound reset */ | ||
2246 | if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) { | ||
2247 | nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset" | ||
2248 | " beating the outbound reset.\n", | ||
2249 | nesqp->hwqp.qp_id); | ||
2250 | nes_rem_ref(&nesqp->ibqp); | ||
2251 | } | ||
2252 | } | ||
2253 | } else { | ||
2254 | nesqp->disconn_pending = 0; | ||
2255 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2256 | } | ||
2257 | nes_rem_ref(&nesqp->ibqp); | ||
2258 | |||
2259 | return 0; | ||
2260 | } | ||
2261 | |||
2262 | |||
2263 | /** | ||
2264 | * nes_disconnect | ||
2265 | */ | ||
2266 | int nes_disconnect(struct nes_qp *nesqp, int abrupt) | ||
2267 | { | ||
2268 | int ret = 0; | ||
2269 | struct nes_vnic *nesvnic; | ||
2270 | struct nes_device *nesdev; | ||
2271 | |||
2272 | nesvnic = to_nesvnic(nesqp->ibqp.device); | ||
2273 | if (!nesvnic) | ||
2274 | return -EINVAL; | ||
2275 | |||
2276 | nesdev = nesvnic->nesdev; | ||
2277 | |||
2278 | nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", | ||
2279 | atomic_read(&nesvnic->netdev->refcnt)); | ||
2280 | |||
2281 | if (nesqp->active_conn) { | ||
2282 | |||
2283 | /* indicate this connection is NOT active */ | ||
2284 | nesqp->active_conn = 0; | ||
2285 | } else { | ||
2286 | /* Need to free the Last Streaming Mode Message */ | ||
2287 | if (nesqp->ietf_frame) { | ||
2288 | pci_free_consistent(nesdev->pcidev, | ||
2289 | nesqp->private_data_len+sizeof(struct ietf_mpa_frame), | ||
2290 | nesqp->ietf_frame, nesqp->ietf_frame_pbase); | ||
2291 | } | ||
2292 | } | ||
2293 | |||
2294 | /* close the CM node down if it is still active */ | ||
2295 | if (nesqp->cm_node) { | ||
2296 | nes_debug(NES_DBG_CM, "Call close API\n"); | ||
2297 | |||
2298 | g_cm_core->api->close(g_cm_core, nesqp->cm_node); | ||
2299 | nesqp->cm_node = NULL; | ||
2300 | } | ||
2301 | |||
2302 | return ret; | ||
2303 | } | ||
2304 | |||
2305 | |||
2306 | /** | ||
2307 | * nes_accept | ||
2308 | */ | ||
2309 | int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | ||
2310 | { | ||
2311 | u64 u64temp; | ||
2312 | struct ib_qp *ibqp; | ||
2313 | struct nes_qp *nesqp; | ||
2314 | struct nes_vnic *nesvnic; | ||
2315 | struct nes_device *nesdev; | ||
2316 | struct nes_cm_node *cm_node; | ||
2317 | struct nes_adapter *adapter; | ||
2318 | struct ib_qp_attr attr; | ||
2319 | struct iw_cm_event cm_event; | ||
2320 | struct nes_hw_qp_wqe *wqe; | ||
2321 | struct nes_v4_quad nes_quad; | ||
2322 | int ret; | ||
2323 | |||
2324 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | ||
2325 | if (!ibqp) | ||
2326 | return -EINVAL; | ||
2327 | |||
2328 | /* get all our handles */ | ||
2329 | nesqp = to_nesqp(ibqp); | ||
2330 | nesvnic = to_nesvnic(nesqp->ibqp.device); | ||
2331 | nesdev = nesvnic->nesdev; | ||
2332 | adapter = nesdev->nesadapter; | ||
2333 | |||
2334 | nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n", | ||
2335 | nesvnic, nesvnic->netdev, nesvnic->netdev->name); | ||
2336 | |||
2337 | /* since this is from a listen, we were able to put node handle into cm_id */ | ||
2338 | cm_node = (struct nes_cm_node *)cm_id->provider_data; | ||
2339 | |||
2340 | /* associate the node with the QP */ | ||
2341 | nesqp->cm_node = (void *)cm_node; | ||
2342 | |||
2343 | nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n", | ||
2344 | nesqp->hwqp.qp_id, cm_node, jiffies); | ||
2345 | atomic_inc(&cm_accepts); | ||
2346 | |||
2347 | nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", | ||
2348 | atomic_read(&nesvnic->netdev->refcnt)); | ||
2349 | |||
2350 | /* allocate the ietf frame and space for private data */ | ||
2351 | nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, | ||
2352 | sizeof(struct ietf_mpa_frame) + conn_param->private_data_len, | ||
2353 | &nesqp->ietf_frame_pbase); | ||
2354 | |||
2355 | if (!nesqp->ietf_frame) { | ||
2356 | nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n"); | ||
2357 | return -ENOMEM; | ||
2358 | } | ||
2359 | |||
2360 | |||
2361 | /* setup the MPA frame */ | ||
2362 | nesqp->private_data_len = conn_param->private_data_len; | ||
2363 | memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | ||
2364 | |||
2365 | memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data, | ||
2366 | conn_param->private_data_len); | ||
2367 | |||
2368 | nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len); | ||
2369 | nesqp->ietf_frame->rev = mpa_version; | ||
2370 | nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC; | ||
2371 | |||
2372 | /* setup our first outgoing iWarp send WQE (the IETF frame response) */ | ||
2373 | wqe = &nesqp->hwqp.sq_vbase[0]; | ||
2374 | |||
2375 | if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) { | ||
2376 | u64temp = (unsigned long)nesqp; | ||
2377 | u64temp |= NES_SW_CONTEXT_ALIGN>>1; | ||
2378 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, | ||
2379 | u64temp); | ||
2380 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = | ||
2381 | cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU); | ||
2382 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = | ||
2383 | cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); | ||
2384 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = | ||
2385 | cpu_to_le32((u32)nesqp->ietf_frame_pbase); | ||
2386 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = | ||
2387 | cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32)); | ||
2388 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = | ||
2389 | cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); | ||
2390 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; | ||
2391 | |||
2392 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( | ||
2393 | NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU); | ||
2394 | } else { | ||
2395 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | ||
2396 | NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM)); | ||
2397 | } | ||
2398 | nesqp->skip_lsmm = 1; | ||
2399 | |||
2400 | |||
2401 | /* Cache the cm_id in the qp */ | ||
2402 | nesqp->cm_id = cm_id; | ||
2403 | cm_node->cm_id = cm_id; | ||
2404 | |||
2405 | /* nesqp->cm_node = (void *)cm_id->provider_data; */ | ||
2406 | cm_id->provider_data = nesqp; | ||
2407 | nesqp->active_conn = 0; | ||
2408 | |||
2409 | nes_cm_init_tsa_conn(nesqp, cm_node); | ||
2410 | |||
2411 | nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port)); | ||
2412 | nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port)); | ||
2413 | nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); | ||
2414 | |||
2415 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | ||
2416 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); | ||
2417 | |||
2418 | nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32( | ||
2419 | nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL, | ||
2420 | NES_ARP_RESOLVE) << 16); | ||
2421 | |||
2422 | nesqp->nesqp_context->ts_val_delta = cpu_to_le32( | ||
2423 | jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); | ||
2424 | |||
2425 | nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id); | ||
2426 | |||
2427 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( | ||
2428 | ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT)); | ||
2429 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); | ||
2430 | |||
2431 | memset(&nes_quad, 0, sizeof(nes_quad)); | ||
2432 | nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); | ||
2433 | nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr; | ||
2434 | nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port; | ||
2435 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | ||
2436 | |||
2437 | /* Produce hash key */ | ||
2438 | nesqp->hte_index = cpu_to_be32( | ||
2439 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | ||
2440 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | ||
2441 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | ||
2442 | |||
2443 | nesqp->hte_index &= adapter->hte_index_mask; | ||
2444 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | ||
2445 | |||
2446 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); | ||
2447 | |||
2448 | nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X," | ||
2449 | " rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n", | ||
2450 | nesqp->hwqp.qp_id, | ||
2451 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | ||
2452 | ntohs(cm_id->remote_addr.sin_port), | ||
2453 | ntohl(cm_id->local_addr.sin_addr.s_addr), | ||
2454 | ntohs(cm_id->local_addr.sin_port), | ||
2455 | le32_to_cpu(nesqp->nesqp_context->rcv_nxt), | ||
2456 | le32_to_cpu(nesqp->nesqp_context->snd_nxt), | ||
2457 | conn_param->private_data_len+sizeof(struct ietf_mpa_frame)); | ||
2458 | |||
2459 | attr.qp_state = IB_QPS_RTS; | ||
2460 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | ||
2461 | |||
2462 | /* notify OF layer that accept event was successfull */ | ||
2463 | cm_id->add_ref(cm_id); | ||
2464 | |||
2465 | cm_event.event = IW_CM_EVENT_ESTABLISHED; | ||
2466 | cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; | ||
2467 | cm_event.provider_data = (void *)nesqp; | ||
2468 | cm_event.local_addr = cm_id->local_addr; | ||
2469 | cm_event.remote_addr = cm_id->remote_addr; | ||
2470 | cm_event.private_data = NULL; | ||
2471 | cm_event.private_data_len = 0; | ||
2472 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
2473 | if (cm_node->loopbackpartner) { | ||
2474 | cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len; | ||
2475 | /* copy entire MPA frame to our cm_node's frame */ | ||
2476 | memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data, | ||
2477 | nesqp->private_data_len); | ||
2478 | create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); | ||
2479 | } | ||
2480 | if (ret) | ||
2481 | printk("%s[%u] OFA CM event_handler returned, ret=%d\n", | ||
2482 | __FUNCTION__, __LINE__, ret); | ||
2483 | |||
2484 | return 0; | ||
2485 | } | ||
2486 | |||
2487 | |||
2488 | /** | ||
2489 | * nes_reject | ||
2490 | */ | ||
2491 | int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | ||
2492 | { | ||
2493 | struct nes_cm_node *cm_node; | ||
2494 | struct nes_cm_core *cm_core; | ||
2495 | |||
2496 | atomic_inc(&cm_rejects); | ||
2497 | cm_node = (struct nes_cm_node *) cm_id->provider_data; | ||
2498 | cm_core = cm_node->cm_core; | ||
2499 | cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; | ||
2500 | |||
2501 | strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP); | ||
2502 | memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); | ||
2503 | |||
2504 | cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len); | ||
2505 | cm_node->mpa_frame.rev = mpa_version; | ||
2506 | cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; | ||
2507 | |||
2508 | cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); | ||
2509 | |||
2510 | return 0; | ||
2511 | } | ||
2512 | |||
2513 | |||
2514 | /** | ||
2515 | * nes_connect | ||
2516 | * setup and launch cm connect node | ||
2517 | */ | ||
2518 | int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | ||
2519 | { | ||
2520 | struct ib_qp *ibqp; | ||
2521 | struct nes_qp *nesqp; | ||
2522 | struct nes_vnic *nesvnic; | ||
2523 | struct nes_device *nesdev; | ||
2524 | struct nes_cm_node *cm_node; | ||
2525 | struct nes_cm_info cm_info; | ||
2526 | |||
2527 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | ||
2528 | if (!ibqp) | ||
2529 | return -EINVAL; | ||
2530 | nesqp = to_nesqp(ibqp); | ||
2531 | if (!nesqp) | ||
2532 | return -EINVAL; | ||
2533 | nesvnic = to_nesvnic(nesqp->ibqp.device); | ||
2534 | if (!nesvnic) | ||
2535 | return -EINVAL; | ||
2536 | nesdev = nesvnic->nesdev; | ||
2537 | if (!nesdev) | ||
2538 | return -EINVAL; | ||
2539 | |||
2540 | atomic_inc(&cm_connects); | ||
2541 | |||
2542 | nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) + | ||
2543 | conn_param->private_data_len, GFP_KERNEL); | ||
2544 | if (!nesqp->ietf_frame) | ||
2545 | return -ENOMEM; | ||
2546 | |||
2547 | /* set qp as having an active connection */ | ||
2548 | nesqp->active_conn = 1; | ||
2549 | |||
2550 | nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", | ||
2551 | nesqp->hwqp.qp_id, | ||
2552 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | ||
2553 | ntohs(cm_id->remote_addr.sin_port), | ||
2554 | ntohl(cm_id->local_addr.sin_addr.s_addr), | ||
2555 | ntohs(cm_id->local_addr.sin_port)); | ||
2556 | |||
2557 | /* cache the cm_id in the qp */ | ||
2558 | nesqp->cm_id = cm_id; | ||
2559 | |||
2560 | cm_id->provider_data = nesqp; | ||
2561 | |||
2562 | /* copy the private data */ | ||
2563 | if (conn_param->private_data_len) { | ||
2564 | memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data, | ||
2565 | conn_param->private_data_len); | ||
2566 | } | ||
2567 | |||
2568 | nesqp->private_data_len = conn_param->private_data_len; | ||
2569 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); | ||
2570 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); | ||
2571 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len); | ||
2572 | |||
2573 | strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ); | ||
2574 | nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC; | ||
2575 | nesqp->ietf_frame->rev = IETF_MPA_VERSION; | ||
2576 | nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len); | ||
2577 | |||
2578 | if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr) | ||
2579 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | ||
2580 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); | ||
2581 | |||
2582 | /* set up the connection params for the node */ | ||
2583 | cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr); | ||
2584 | cm_info.loc_port = (cm_id->local_addr.sin_port); | ||
2585 | cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr); | ||
2586 | cm_info.rem_port = (cm_id->remote_addr.sin_port); | ||
2587 | cm_info.cm_id = cm_id; | ||
2588 | cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; | ||
2589 | |||
2590 | cm_id->add_ref(cm_id); | ||
2591 | nes_add_ref(&nesqp->ibqp); | ||
2592 | |||
2593 | /* create a connect CM node connection */ | ||
2594 | cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info); | ||
2595 | if (!cm_node) { | ||
2596 | if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr) | ||
2597 | nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | ||
2598 | PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); | ||
2599 | nes_rem_ref(&nesqp->ibqp); | ||
2600 | kfree(nesqp->ietf_frame); | ||
2601 | nesqp->ietf_frame = NULL; | ||
2602 | cm_id->rem_ref(cm_id); | ||
2603 | return -ENOMEM; | ||
2604 | } | ||
2605 | |||
2606 | cm_node->apbvt_set = 1; | ||
2607 | nesqp->cm_node = cm_node; | ||
2608 | |||
2609 | return 0; | ||
2610 | } | ||
2611 | |||
2612 | |||
2613 | /** | ||
2614 | * nes_create_listen | ||
2615 | */ | ||
2616 | int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | ||
2617 | { | ||
2618 | struct nes_vnic *nesvnic; | ||
2619 | struct nes_cm_listener *cm_node; | ||
2620 | struct nes_cm_info cm_info; | ||
2621 | struct nes_adapter *adapter; | ||
2622 | int err; | ||
2623 | |||
2624 | |||
2625 | nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n", | ||
2626 | cm_id, ntohs(cm_id->local_addr.sin_port)); | ||
2627 | |||
2628 | nesvnic = to_nesvnic(cm_id->device); | ||
2629 | if (!nesvnic) | ||
2630 | return -EINVAL; | ||
2631 | adapter = nesvnic->nesdev->nesadapter; | ||
2632 | nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n", | ||
2633 | nesvnic, nesvnic->netdev, nesvnic->netdev->name); | ||
2634 | |||
2635 | nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n", | ||
2636 | nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr); | ||
2637 | |||
2638 | /* setup listen params in our api call struct */ | ||
2639 | cm_info.loc_addr = nesvnic->local_ipaddr; | ||
2640 | cm_info.loc_port = cm_id->local_addr.sin_port; | ||
2641 | cm_info.backlog = backlog; | ||
2642 | cm_info.cm_id = cm_id; | ||
2643 | |||
2644 | cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; | ||
2645 | |||
2646 | |||
2647 | cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); | ||
2648 | if (!cm_node) { | ||
2649 | printk("%s[%u] Error returned from listen API call\n", | ||
2650 | __FUNCTION__, __LINE__); | ||
2651 | return -ENOMEM; | ||
2652 | } | ||
2653 | |||
2654 | cm_id->provider_data = cm_node; | ||
2655 | |||
2656 | if (!cm_node->reused_node) { | ||
2657 | err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), | ||
2658 | PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); | ||
2659 | if (err) { | ||
2660 | printk("nes_manage_apbvt call returned %d.\n", err); | ||
2661 | g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); | ||
2662 | return err; | ||
2663 | } | ||
2664 | cm_listens_created++; | ||
2665 | } | ||
2666 | |||
2667 | cm_id->add_ref(cm_id); | ||
2668 | cm_id->provider_data = (void *)cm_node; | ||
2669 | |||
2670 | |||
2671 | return 0; | ||
2672 | } | ||
2673 | |||
2674 | |||
2675 | /** | ||
2676 | * nes_destroy_listen | ||
2677 | */ | ||
2678 | int nes_destroy_listen(struct iw_cm_id *cm_id) | ||
2679 | { | ||
2680 | if (cm_id->provider_data) | ||
2681 | g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data); | ||
2682 | else | ||
2683 | nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n"); | ||
2684 | |||
2685 | cm_id->rem_ref(cm_id); | ||
2686 | |||
2687 | return 0; | ||
2688 | } | ||
2689 | |||
2690 | |||
2691 | /** | ||
2692 | * nes_cm_recv | ||
2693 | */ | ||
2694 | int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice) | ||
2695 | { | ||
2696 | cm_packets_received++; | ||
2697 | if ((g_cm_core) && (g_cm_core->api)) { | ||
2698 | g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb); | ||
2699 | } else { | ||
2700 | nes_debug(NES_DBG_CM, "Unable to process packet for CM," | ||
2701 | " cm is not setup properly.\n"); | ||
2702 | } | ||
2703 | |||
2704 | return 0; | ||
2705 | } | ||
2706 | |||
2707 | |||
2708 | /** | ||
2709 | * nes_cm_start | ||
2710 | * Start and init a cm core module | ||
2711 | */ | ||
2712 | int nes_cm_start(void) | ||
2713 | { | ||
2714 | nes_debug(NES_DBG_CM, "\n"); | ||
2715 | /* create the primary CM core, pass this handle to subsequent core inits */ | ||
2716 | g_cm_core = nes_cm_alloc_core(); | ||
2717 | if (g_cm_core) { | ||
2718 | return 0; | ||
2719 | } else { | ||
2720 | return -ENOMEM; | ||
2721 | } | ||
2722 | } | ||
2723 | |||
2724 | |||
2725 | /** | ||
2726 | * nes_cm_stop | ||
2727 | * stop and dealloc all cm core instances | ||
2728 | */ | ||
2729 | int nes_cm_stop(void) | ||
2730 | { | ||
2731 | g_cm_core->api->destroy_cm_core(g_cm_core); | ||
2732 | return 0; | ||
2733 | } | ||
2734 | |||
2735 | |||
2736 | /** | ||
2737 | * cm_event_connected | ||
2738 | * handle a connected event, setup QPs and HW | ||
2739 | */ | ||
2740 | void cm_event_connected(struct nes_cm_event *event) | ||
2741 | { | ||
2742 | u64 u64temp; | ||
2743 | struct nes_qp *nesqp; | ||
2744 | struct nes_vnic *nesvnic; | ||
2745 | struct nes_device *nesdev; | ||
2746 | struct nes_cm_node *cm_node; | ||
2747 | struct nes_adapter *nesadapter; | ||
2748 | struct ib_qp_attr attr; | ||
2749 | struct iw_cm_id *cm_id; | ||
2750 | struct iw_cm_event cm_event; | ||
2751 | struct nes_hw_qp_wqe *wqe; | ||
2752 | struct nes_v4_quad nes_quad; | ||
2753 | int ret; | ||
2754 | |||
2755 | /* get all our handles */ | ||
2756 | cm_node = event->cm_node; | ||
2757 | cm_id = cm_node->cm_id; | ||
2758 | nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id); | ||
2759 | nesqp = (struct nes_qp *)cm_id->provider_data; | ||
2760 | nesvnic = to_nesvnic(nesqp->ibqp.device); | ||
2761 | nesdev = nesvnic->nesdev; | ||
2762 | nesadapter = nesdev->nesadapter; | ||
2763 | |||
2764 | if (nesqp->destroyed) { | ||
2765 | return; | ||
2766 | } | ||
2767 | atomic_inc(&cm_connecteds); | ||
2768 | nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" | ||
2769 | " local port 0x%04X. jiffies = %lu.\n", | ||
2770 | nesqp->hwqp.qp_id, | ||
2771 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | ||
2772 | ntohs(cm_id->remote_addr.sin_port), | ||
2773 | ntohs(cm_id->local_addr.sin_port), | ||
2774 | jiffies); | ||
2775 | |||
2776 | nes_cm_init_tsa_conn(nesqp, cm_node); | ||
2777 | |||
2778 | /* set the QP tsa context */ | ||
2779 | nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port)); | ||
2780 | nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port)); | ||
2781 | nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); | ||
2782 | |||
2783 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | ||
2784 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); | ||
2785 | nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32( | ||
2786 | nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), | ||
2787 | NULL, NES_ARP_RESOLVE) << 16); | ||
2788 | nesqp->nesqp_context->ts_val_delta = cpu_to_le32( | ||
2789 | jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); | ||
2790 | nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id); | ||
2791 | nesqp->nesqp_context->ird_ord_sizes |= | ||
2792 | cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); | ||
2793 | |||
2794 | /* Adjust tail for not having a LSMM */ | ||
2795 | nesqp->hwqp.sq_tail = 1; | ||
2796 | |||
2797 | #if defined(NES_SEND_FIRST_WRITE) | ||
2798 | if (cm_node->send_write0) { | ||
2799 | nes_debug(NES_DBG_CM, "Sending first write.\n"); | ||
2800 | wqe = &nesqp->hwqp.sq_vbase[0]; | ||
2801 | u64temp = (unsigned long)nesqp; | ||
2802 | u64temp |= NES_SW_CONTEXT_ALIGN>>1; | ||
2803 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, | ||
2804 | u64temp); | ||
2805 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); | ||
2806 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; | ||
2807 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; | ||
2808 | wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; | ||
2809 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; | ||
2810 | wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; | ||
2811 | |||
2812 | /* use the reserved spot on the WQ for the extra first WQE */ | ||
2813 | nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | | ||
2814 | NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM)); | ||
2815 | nesqp->skip_lsmm = 1; | ||
2816 | nesqp->hwqp.sq_tail = 0; | ||
2817 | nes_write32(nesdev->regs + NES_WQE_ALLOC, | ||
2818 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); | ||
2819 | } | ||
2820 | #endif | ||
2821 | |||
2822 | memset(&nes_quad, 0, sizeof(nes_quad)); | ||
2823 | |||
2824 | nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); | ||
2825 | nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr; | ||
2826 | nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port; | ||
2827 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | ||
2828 | |||
2829 | /* Produce hash key */ | ||
2830 | nesqp->hte_index = cpu_to_be32( | ||
2831 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | ||
2832 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | ||
2833 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | ||
2834 | |||
2835 | nesqp->hte_index &= nesadapter->hte_index_mask; | ||
2836 | nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); | ||
2837 | |||
2838 | nesqp->ietf_frame = &cm_node->mpa_frame; | ||
2839 | nesqp->private_data_len = (u8) cm_node->mpa_frame_size; | ||
2840 | cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); | ||
2841 | |||
2842 | /* modify QP state to rts */ | ||
2843 | attr.qp_state = IB_QPS_RTS; | ||
2844 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | ||
2845 | |||
2846 | /* notify OF layer we successfully created the requested connection */ | ||
2847 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | ||
2848 | cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; | ||
2849 | cm_event.provider_data = cm_id->provider_data; | ||
2850 | cm_event.local_addr.sin_family = AF_INET; | ||
2851 | cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; | ||
2852 | cm_event.remote_addr = cm_id->remote_addr; | ||
2853 | |||
2854 | cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; | ||
2855 | cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size; | ||
2856 | |||
2857 | cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; | ||
2858 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
2859 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | ||
2860 | |||
2861 | if (ret) | ||
2862 | printk("%s[%u] OFA CM event_handler returned, ret=%d\n", | ||
2863 | __FUNCTION__, __LINE__, ret); | ||
2864 | nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n", | ||
2865 | nesqp->hwqp.qp_id, jiffies ); | ||
2866 | |||
2867 | nes_rem_ref(&nesqp->ibqp); | ||
2868 | |||
2869 | return; | ||
2870 | } | ||
2871 | |||
2872 | |||
2873 | /** | ||
2874 | * cm_event_connect_error | ||
2875 | */ | ||
2876 | void cm_event_connect_error(struct nes_cm_event *event) | ||
2877 | { | ||
2878 | struct nes_qp *nesqp; | ||
2879 | struct iw_cm_id *cm_id; | ||
2880 | struct iw_cm_event cm_event; | ||
2881 | /* struct nes_cm_info cm_info; */ | ||
2882 | int ret; | ||
2883 | |||
2884 | if (!event->cm_node) | ||
2885 | return; | ||
2886 | |||
2887 | cm_id = event->cm_node->cm_id; | ||
2888 | if (!cm_id) { | ||
2889 | return; | ||
2890 | } | ||
2891 | |||
2892 | nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id); | ||
2893 | nesqp = cm_id->provider_data; | ||
2894 | |||
2895 | if (!nesqp) { | ||
2896 | return; | ||
2897 | } | ||
2898 | |||
2899 | /* notify OF layer about this connection error event */ | ||
2900 | /* cm_id->rem_ref(cm_id); */ | ||
2901 | nesqp->cm_id = NULL; | ||
2902 | cm_id->provider_data = NULL; | ||
2903 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | ||
2904 | cm_event.status = IW_CM_EVENT_STATUS_REJECTED; | ||
2905 | cm_event.provider_data = cm_id->provider_data; | ||
2906 | cm_event.local_addr = cm_id->local_addr; | ||
2907 | cm_event.remote_addr = cm_id->remote_addr; | ||
2908 | cm_event.private_data = NULL; | ||
2909 | cm_event.private_data_len = 0; | ||
2910 | |||
2911 | nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n", | ||
2912 | cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr); | ||
2913 | |||
2914 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
2915 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | ||
2916 | if (ret) | ||
2917 | printk("%s[%u] OFA CM event_handler returned, ret=%d\n", | ||
2918 | __FUNCTION__, __LINE__, ret); | ||
2919 | nes_rem_ref(&nesqp->ibqp); | ||
2920 | cm_id->rem_ref(cm_id); | ||
2921 | |||
2922 | return; | ||
2923 | } | ||
2924 | |||
2925 | |||
2926 | /** | ||
2927 | * cm_event_reset | ||
2928 | */ | ||
2929 | void cm_event_reset(struct nes_cm_event *event) | ||
2930 | { | ||
2931 | struct nes_qp *nesqp; | ||
2932 | struct iw_cm_id *cm_id; | ||
2933 | struct iw_cm_event cm_event; | ||
2934 | /* struct nes_cm_info cm_info; */ | ||
2935 | int ret; | ||
2936 | |||
2937 | if (!event->cm_node) | ||
2938 | return; | ||
2939 | |||
2940 | if (!event->cm_node->cm_id) | ||
2941 | return; | ||
2942 | |||
2943 | cm_id = event->cm_node->cm_id; | ||
2944 | |||
2945 | nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id); | ||
2946 | nesqp = cm_id->provider_data; | ||
2947 | |||
2948 | nesqp->cm_id = NULL; | ||
2949 | /* cm_id->provider_data = NULL; */ | ||
2950 | cm_event.event = IW_CM_EVENT_DISCONNECT; | ||
2951 | cm_event.status = IW_CM_EVENT_STATUS_RESET; | ||
2952 | cm_event.provider_data = cm_id->provider_data; | ||
2953 | cm_event.local_addr = cm_id->local_addr; | ||
2954 | cm_event.remote_addr = cm_id->remote_addr; | ||
2955 | cm_event.private_data = NULL; | ||
2956 | cm_event.private_data_len = 0; | ||
2957 | |||
2958 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
2959 | nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); | ||
2960 | |||
2961 | |||
2962 | /* notify OF layer about this connection error event */ | ||
2963 | cm_id->rem_ref(cm_id); | ||
2964 | |||
2965 | return; | ||
2966 | } | ||
2967 | |||
2968 | |||
2969 | /** | ||
2970 | * cm_event_mpa_req | ||
2971 | */ | ||
2972 | void cm_event_mpa_req(struct nes_cm_event *event) | ||
2973 | { | ||
2974 | struct iw_cm_id *cm_id; | ||
2975 | struct iw_cm_event cm_event; | ||
2976 | int ret; | ||
2977 | struct nes_cm_node *cm_node; | ||
2978 | |||
2979 | cm_node = event->cm_node; | ||
2980 | if (!cm_node) | ||
2981 | return; | ||
2982 | cm_id = cm_node->cm_id; | ||
2983 | |||
2984 | atomic_inc(&cm_connect_reqs); | ||
2985 | nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", | ||
2986 | cm_node, cm_id, jiffies); | ||
2987 | |||
2988 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; | ||
2989 | cm_event.status = IW_CM_EVENT_STATUS_OK; | ||
2990 | cm_event.provider_data = (void *)cm_node; | ||
2991 | |||
2992 | cm_event.local_addr.sin_family = AF_INET; | ||
2993 | cm_event.local_addr.sin_port = htons(event->cm_info.loc_port); | ||
2994 | cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr); | ||
2995 | |||
2996 | cm_event.remote_addr.sin_family = AF_INET; | ||
2997 | cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); | ||
2998 | cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); | ||
2999 | |||
3000 | cm_event.private_data = cm_node->mpa_frame_buf; | ||
3001 | cm_event.private_data_len = (u8) cm_node->mpa_frame_size; | ||
3002 | |||
3003 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
3004 | if (ret) | ||
3005 | printk("%s[%u] OFA CM event_handler returned, ret=%d\n", | ||
3006 | __FUNCTION__, __LINE__, ret); | ||
3007 | |||
3008 | return; | ||
3009 | } | ||
3010 | |||
3011 | |||
3012 | static void nes_cm_event_handler(struct work_struct *); | ||
3013 | |||
3014 | /** | ||
3015 | * nes_cm_post_event | ||
3016 | * post an event to the cm event handler | ||
3017 | */ | ||
3018 | int nes_cm_post_event(struct nes_cm_event *event) | ||
3019 | { | ||
3020 | atomic_inc(&event->cm_node->cm_core->events_posted); | ||
3021 | add_ref_cm_node(event->cm_node); | ||
3022 | event->cm_info.cm_id->add_ref(event->cm_info.cm_id); | ||
3023 | INIT_WORK(&event->event_work, nes_cm_event_handler); | ||
3024 | nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event); | ||
3025 | |||
3026 | queue_work(event->cm_node->cm_core->event_wq, &event->event_work); | ||
3027 | |||
3028 | nes_debug(NES_DBG_CM, "Exit\n"); | ||
3029 | return 0; | ||
3030 | } | ||
3031 | |||
3032 | |||
3033 | /** | ||
3034 | * nes_cm_event_handler | ||
3035 | * worker function to handle cm events | ||
3036 | * will free instance of nes_cm_event | ||
3037 | */ | ||
3038 | static void nes_cm_event_handler(struct work_struct *work) | ||
3039 | { | ||
3040 | struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work); | ||
3041 | struct nes_cm_core *cm_core; | ||
3042 | |||
3043 | if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) { | ||
3044 | return; | ||
3045 | } | ||
3046 | cm_core = event->cm_node->cm_core; | ||
3047 | nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", | ||
3048 | event, event->type, atomic_read(&cm_core->events_posted)); | ||
3049 | |||
3050 | switch (event->type) { | ||
3051 | case NES_CM_EVENT_MPA_REQ: | ||
3052 | cm_event_mpa_req(event); | ||
3053 | nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n"); | ||
3054 | break; | ||
3055 | case NES_CM_EVENT_RESET: | ||
3056 | nes_debug(NES_DBG_CM, "CM Event: RESET\n"); | ||
3057 | cm_event_reset(event); | ||
3058 | break; | ||
3059 | case NES_CM_EVENT_CONNECTED: | ||
3060 | if ((!event->cm_node->cm_id) || | ||
3061 | (event->cm_node->state != NES_CM_STATE_TSA)) { | ||
3062 | break; | ||
3063 | } | ||
3064 | cm_event_connected(event); | ||
3065 | nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); | ||
3066 | break; | ||
3067 | case NES_CM_EVENT_ABORTED: | ||
3068 | if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) { | ||
3069 | break; | ||
3070 | } | ||
3071 | cm_event_connect_error(event); | ||
3072 | nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); | ||
3073 | break; | ||
3074 | case NES_CM_EVENT_DROPPED_PKT: | ||
3075 | nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n"); | ||
3076 | break; | ||
3077 | default: | ||
3078 | nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n"); | ||
3079 | break; | ||
3080 | } | ||
3081 | |||
3082 | atomic_dec(&cm_core->events_posted); | ||
3083 | event->cm_info.cm_id->rem_ref(event->cm_info.cm_id); | ||
3084 | rem_ref_cm_node(cm_core, event->cm_node); | ||
3085 | kfree(event); | ||
3086 | |||
3087 | return; | ||
3088 | } | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h new file mode 100644 index 000000000000..a59f0a7fb278 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #ifndef NES_CM_H | ||
35 | #define NES_CM_H | ||
36 | |||
37 | #define QUEUE_EVENTS | ||
38 | |||
39 | #define NES_MANAGE_APBVT_DEL 0 | ||
40 | #define NES_MANAGE_APBVT_ADD 1 | ||
41 | |||
42 | /* IETF MPA -- defines, enums, structs */ | ||
43 | #define IEFT_MPA_KEY_REQ "MPA ID Req Frame" | ||
44 | #define IEFT_MPA_KEY_REP "MPA ID Rep Frame" | ||
45 | #define IETF_MPA_KEY_SIZE 16 | ||
46 | #define IETF_MPA_VERSION 1 | ||
47 | |||
48 | enum ietf_mpa_flags { | ||
49 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ | ||
50 | IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ | ||
51 | IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ | ||
52 | }; | ||
53 | |||
54 | struct ietf_mpa_frame { | ||
55 | u8 key[IETF_MPA_KEY_SIZE]; | ||
56 | u8 flags; | ||
57 | u8 rev; | ||
58 | __be16 priv_data_len; | ||
59 | u8 priv_data[0]; | ||
60 | }; | ||
61 | |||
62 | #define ietf_mpa_req_resp_frame ietf_mpa_frame | ||
63 | |||
64 | struct nes_v4_quad { | ||
65 | u32 rsvd0; | ||
66 | __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */ | ||
67 | __be32 SrcIpadr; | ||
68 | __be16 TcpPorts[2]; /* src is low, dest is high */ | ||
69 | }; | ||
70 | |||
71 | struct nes_cm_node; | ||
72 | enum nes_timer_type { | ||
73 | NES_TIMER_TYPE_SEND, | ||
74 | NES_TIMER_TYPE_RECV, | ||
75 | NES_TIMER_NODE_CLEANUP, | ||
76 | NES_TIMER_TYPE_CLOSE, | ||
77 | }; | ||
78 | |||
79 | #define MAX_NES_IFS 4 | ||
80 | |||
81 | #define SET_ACK 1 | ||
82 | #define SET_SYN 2 | ||
83 | #define SET_FIN 4 | ||
84 | #define SET_RST 8 | ||
85 | |||
86 | struct option_base { | ||
87 | u8 optionnum; | ||
88 | u8 length; | ||
89 | }; | ||
90 | |||
91 | enum option_numbers { | ||
92 | OPTION_NUMBER_END, | ||
93 | OPTION_NUMBER_NONE, | ||
94 | OPTION_NUMBER_MSS, | ||
95 | OPTION_NUMBER_WINDOW_SCALE, | ||
96 | OPTION_NUMBER_SACK_PERM, | ||
97 | OPTION_NUMBER_SACK, | ||
98 | OPTION_NUMBER_WRITE0 = 0xbc | ||
99 | }; | ||
100 | |||
101 | struct option_mss { | ||
102 | u8 optionnum; | ||
103 | u8 length; | ||
104 | __be16 mss; | ||
105 | }; | ||
106 | |||
107 | struct option_windowscale { | ||
108 | u8 optionnum; | ||
109 | u8 length; | ||
110 | u8 shiftcount; | ||
111 | }; | ||
112 | |||
113 | union all_known_options { | ||
114 | char as_end; | ||
115 | struct option_base as_base; | ||
116 | struct option_mss as_mss; | ||
117 | struct option_windowscale as_windowscale; | ||
118 | }; | ||
119 | |||
120 | struct nes_timer_entry { | ||
121 | struct list_head list; | ||
122 | unsigned long timetosend; /* jiffies */ | ||
123 | struct sk_buff *skb; | ||
124 | u32 type; | ||
125 | u32 retrycount; | ||
126 | u32 retranscount; | ||
127 | u32 context; | ||
128 | u32 seq_num; | ||
129 | u32 send_retrans; | ||
130 | int close_when_complete; | ||
131 | struct net_device *netdev; | ||
132 | }; | ||
133 | |||
134 | #define NES_DEFAULT_RETRYS 64 | ||
135 | #define NES_DEFAULT_RETRANS 8 | ||
136 | #ifdef CONFIG_INFINIBAND_NES_DEBUG | ||
137 | #define NES_RETRY_TIMEOUT (1000*HZ/1000) | ||
138 | #else | ||
139 | #define NES_RETRY_TIMEOUT (3000*HZ/1000) | ||
140 | #endif | ||
141 | #define NES_SHORT_TIME (10) | ||
142 | #define NES_LONG_TIME (2000*HZ/1000) | ||
143 | |||
144 | #define NES_CM_HASHTABLE_SIZE 1024 | ||
145 | #define NES_CM_TCP_TIMER_INTERVAL 3000 | ||
146 | #define NES_CM_DEFAULT_MTU 1540 | ||
147 | #define NES_CM_DEFAULT_FRAME_CNT 10 | ||
148 | #define NES_CM_THREAD_STACK_SIZE 256 | ||
149 | #define NES_CM_DEFAULT_RCV_WND 64240 // before we know that window scaling is allowed | ||
150 | #define NES_CM_DEFAULT_RCV_WND_SCALED 256960 // after we know that window scaling is allowed | ||
151 | #define NES_CM_DEFAULT_RCV_WND_SCALE 2 | ||
152 | #define NES_CM_DEFAULT_FREE_PKTS 0x000A | ||
153 | #define NES_CM_FREE_PKT_LO_WATERMARK 2 | ||
154 | |||
155 | #define NES_CM_DEFAULT_MSS 536 | ||
156 | |||
157 | #define NES_CM_DEF_SEQ 0x159bf75f | ||
158 | #define NES_CM_DEF_LOCAL_ID 0x3b47 | ||
159 | |||
160 | #define NES_CM_DEF_SEQ2 0x18ed5740 | ||
161 | #define NES_CM_DEF_LOCAL_ID2 0xb807 | ||
162 | |||
163 | typedef u32 nes_addr_t; | ||
164 | |||
165 | #define nes_cm_tsa_context nes_qp_context | ||
166 | |||
167 | struct nes_qp; | ||
168 | |||
169 | /* cm node transition states */ | ||
170 | enum nes_cm_node_state { | ||
171 | NES_CM_STATE_UNKNOWN, | ||
172 | NES_CM_STATE_INITED, | ||
173 | NES_CM_STATE_LISTENING, | ||
174 | NES_CM_STATE_SYN_RCVD, | ||
175 | NES_CM_STATE_SYN_SENT, | ||
176 | NES_CM_STATE_ONE_SIDE_ESTABLISHED, | ||
177 | NES_CM_STATE_ESTABLISHED, | ||
178 | NES_CM_STATE_ACCEPTING, | ||
179 | NES_CM_STATE_MPAREQ_SENT, | ||
180 | NES_CM_STATE_TSA, | ||
181 | NES_CM_STATE_FIN_WAIT1, | ||
182 | NES_CM_STATE_FIN_WAIT2, | ||
183 | NES_CM_STATE_CLOSE_WAIT, | ||
184 | NES_CM_STATE_TIME_WAIT, | ||
185 | NES_CM_STATE_LAST_ACK, | ||
186 | NES_CM_STATE_CLOSING, | ||
187 | NES_CM_STATE_CLOSED | ||
188 | }; | ||
189 | |||
190 | /* type of nes connection */ | ||
191 | enum nes_cm_conn_type { | ||
192 | NES_CM_IWARP_CONN_TYPE, | ||
193 | }; | ||
194 | |||
195 | /* CM context params */ | ||
196 | struct nes_cm_tcp_context { | ||
197 | u8 client; | ||
198 | |||
199 | u32 loc_seq_num; | ||
200 | u32 loc_ack_num; | ||
201 | u32 rem_ack_num; | ||
202 | u32 rcv_nxt; | ||
203 | |||
204 | u32 loc_id; | ||
205 | u32 rem_id; | ||
206 | |||
207 | u32 snd_wnd; | ||
208 | u32 max_snd_wnd; | ||
209 | |||
210 | u32 rcv_wnd; | ||
211 | u32 mss; | ||
212 | u8 snd_wscale; | ||
213 | u8 rcv_wscale; | ||
214 | |||
215 | struct nes_cm_tsa_context tsa_cntxt; | ||
216 | struct timeval sent_ts; | ||
217 | }; | ||
218 | |||
219 | |||
220 | enum nes_cm_listener_state { | ||
221 | NES_CM_LISTENER_PASSIVE_STATE=1, | ||
222 | NES_CM_LISTENER_ACTIVE_STATE=2, | ||
223 | NES_CM_LISTENER_EITHER_STATE=3 | ||
224 | }; | ||
225 | |||
226 | struct nes_cm_listener { | ||
227 | struct list_head list; | ||
228 | u64 session_id; | ||
229 | struct nes_cm_core *cm_core; | ||
230 | u8 loc_mac[ETH_ALEN]; | ||
231 | nes_addr_t loc_addr; | ||
232 | u16 loc_port; | ||
233 | struct iw_cm_id *cm_id; | ||
234 | enum nes_cm_conn_type conn_type; | ||
235 | atomic_t ref_count; | ||
236 | struct nes_vnic *nesvnic; | ||
237 | atomic_t pend_accepts_cnt; | ||
238 | int backlog; | ||
239 | enum nes_cm_listener_state listener_state; | ||
240 | u32 reused_node; | ||
241 | }; | ||
242 | |||
243 | /* per connection node and node state information */ | ||
244 | struct nes_cm_node { | ||
245 | u64 session_id; | ||
246 | u32 hashkey; | ||
247 | |||
248 | nes_addr_t loc_addr, rem_addr; | ||
249 | u16 loc_port, rem_port; | ||
250 | |||
251 | u8 loc_mac[ETH_ALEN]; | ||
252 | u8 rem_mac[ETH_ALEN]; | ||
253 | |||
254 | enum nes_cm_node_state state; | ||
255 | struct nes_cm_tcp_context tcp_cntxt; | ||
256 | struct nes_cm_core *cm_core; | ||
257 | struct sk_buff_head resend_list; | ||
258 | atomic_t ref_count; | ||
259 | struct net_device *netdev; | ||
260 | |||
261 | struct nes_cm_node *loopbackpartner; | ||
262 | struct list_head retrans_list; | ||
263 | spinlock_t retrans_list_lock; | ||
264 | struct list_head recv_list; | ||
265 | spinlock_t recv_list_lock; | ||
266 | |||
267 | int send_write0; | ||
268 | union { | ||
269 | struct ietf_mpa_frame mpa_frame; | ||
270 | u8 mpa_frame_buf[NES_CM_DEFAULT_MTU]; | ||
271 | }; | ||
272 | u16 mpa_frame_size; | ||
273 | struct iw_cm_id *cm_id; | ||
274 | struct list_head list; | ||
275 | int accelerated; | ||
276 | struct nes_cm_listener *listener; | ||
277 | enum nes_cm_conn_type conn_type; | ||
278 | struct nes_vnic *nesvnic; | ||
279 | int apbvt_set; | ||
280 | int accept_pend; | ||
281 | }; | ||
282 | |||
283 | /* structure for client or CM to fill when making CM api calls. */ | ||
284 | /* - only need to set relevant data, based on op. */ | ||
285 | struct nes_cm_info { | ||
286 | union { | ||
287 | struct iw_cm_id *cm_id; | ||
288 | struct net_device *netdev; | ||
289 | }; | ||
290 | |||
291 | u16 loc_port; | ||
292 | u16 rem_port; | ||
293 | nes_addr_t loc_addr; | ||
294 | nes_addr_t rem_addr; | ||
295 | |||
296 | enum nes_cm_conn_type conn_type; | ||
297 | int backlog; | ||
298 | }; | ||
299 | |||
300 | /* CM event codes */ | ||
301 | enum nes_cm_event_type { | ||
302 | NES_CM_EVENT_UNKNOWN, | ||
303 | NES_CM_EVENT_ESTABLISHED, | ||
304 | NES_CM_EVENT_MPA_REQ, | ||
305 | NES_CM_EVENT_MPA_CONNECT, | ||
306 | NES_CM_EVENT_MPA_ACCEPT, | ||
307 | NES_CM_EVENT_MPA_ESTABLISHED, | ||
308 | NES_CM_EVENT_CONNECTED, | ||
309 | NES_CM_EVENT_CLOSED, | ||
310 | NES_CM_EVENT_RESET, | ||
311 | NES_CM_EVENT_DROPPED_PKT, | ||
312 | NES_CM_EVENT_CLOSE_IMMED, | ||
313 | NES_CM_EVENT_CLOSE_HARD, | ||
314 | NES_CM_EVENT_CLOSE_CLEAN, | ||
315 | NES_CM_EVENT_ABORTED, | ||
316 | NES_CM_EVENT_SEND_FIRST | ||
317 | }; | ||
318 | |||
319 | /* event to post to CM event handler */ | ||
320 | struct nes_cm_event { | ||
321 | enum nes_cm_event_type type; | ||
322 | |||
323 | struct nes_cm_info cm_info; | ||
324 | struct work_struct event_work; | ||
325 | struct nes_cm_node *cm_node; | ||
326 | }; | ||
327 | |||
328 | struct nes_cm_core { | ||
329 | enum nes_cm_node_state state; | ||
330 | atomic_t session_id; | ||
331 | |||
332 | atomic_t listen_node_cnt; | ||
333 | struct nes_cm_node listen_list; | ||
334 | spinlock_t listen_list_lock; | ||
335 | |||
336 | u32 mtu; | ||
337 | u32 free_tx_pkt_max; | ||
338 | u32 rx_pkt_posted; | ||
339 | struct sk_buff_head tx_free_list; | ||
340 | atomic_t ht_node_cnt; | ||
341 | struct list_head connected_nodes; | ||
342 | /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */ | ||
343 | spinlock_t ht_lock; | ||
344 | |||
345 | struct timer_list tcp_timer; | ||
346 | |||
347 | struct nes_cm_ops *api; | ||
348 | |||
349 | int (*post_event)(struct nes_cm_event *event); | ||
350 | atomic_t events_posted; | ||
351 | struct workqueue_struct *event_wq; | ||
352 | struct workqueue_struct *disconn_wq; | ||
353 | |||
354 | atomic_t node_cnt; | ||
355 | u64 aborted_connects; | ||
356 | u32 options; | ||
357 | |||
358 | struct nes_cm_node *current_listen_node; | ||
359 | }; | ||
360 | |||
361 | |||
362 | #define NES_CM_SET_PKT_SIZE (1 << 1) | ||
363 | #define NES_CM_SET_FREE_PKT_Q_SIZE (1 << 2) | ||
364 | |||
365 | /* CM ops/API for client interface */ | ||
366 | struct nes_cm_ops { | ||
367 | int (*accelerated)(struct nes_cm_core *, struct nes_cm_node *); | ||
368 | struct nes_cm_listener * (*listen)(struct nes_cm_core *, struct nes_vnic *, | ||
369 | struct nes_cm_info *); | ||
370 | int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *); | ||
371 | struct nes_cm_node * (*connect)(struct nes_cm_core *, | ||
372 | struct nes_vnic *, struct ietf_mpa_frame *, | ||
373 | struct nes_cm_info *); | ||
374 | int (*close)(struct nes_cm_core *, struct nes_cm_node *); | ||
375 | int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *, | ||
376 | struct nes_cm_node *); | ||
377 | int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, | ||
378 | struct nes_cm_node *); | ||
379 | int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, | ||
380 | struct sk_buff *); | ||
381 | int (*destroy_cm_core)(struct nes_cm_core *); | ||
382 | int (*get)(struct nes_cm_core *); | ||
383 | int (*set)(struct nes_cm_core *, u32, u32); | ||
384 | }; | ||
385 | |||
386 | |||
387 | int send_mpa_request(struct nes_cm_node *); | ||
388 | struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *, | ||
389 | void *, u32, void *, u32, u8); | ||
390 | int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, | ||
391 | enum nes_timer_type, int, int); | ||
392 | void nes_cm_timer_tick(unsigned long); | ||
393 | int send_syn(struct nes_cm_node *, u32); | ||
394 | int send_reset(struct nes_cm_node *); | ||
395 | int send_ack(struct nes_cm_node *); | ||
396 | int send_fin(struct nes_cm_node *, struct sk_buff *); | ||
397 | struct sk_buff *get_free_pkt(struct nes_cm_node *); | ||
398 | int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *); | ||
399 | |||
400 | struct nes_cm_node * mini_cm_connect(struct nes_cm_core *, | ||
401 | struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *); | ||
402 | int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); | ||
403 | int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *); | ||
404 | int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); | ||
405 | int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *); | ||
406 | struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *); | ||
407 | int mini_cm_dealloc_core(struct nes_cm_core *); | ||
408 | int mini_cm_get(struct nes_cm_core *); | ||
409 | int mini_cm_set(struct nes_cm_core *, u32, u32); | ||
410 | |||
411 | int nes_cm_disconn(struct nes_qp *); | ||
412 | void nes_disconnect_worker(struct work_struct *); | ||
413 | int nes_cm_disconn_true(struct nes_qp *); | ||
414 | int nes_disconnect(struct nes_qp *, int); | ||
415 | |||
416 | int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); | ||
417 | int nes_reject(struct iw_cm_id *, const void *, u8); | ||
418 | int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); | ||
419 | int nes_create_listen(struct iw_cm_id *, int); | ||
420 | int nes_destroy_listen(struct iw_cm_id *); | ||
421 | |||
422 | int nes_cm_recv(struct sk_buff *, struct net_device *); | ||
423 | int nes_cm_start(void); | ||
424 | int nes_cm_stop(void); | ||
425 | |||
426 | /* CM event handler functions */ | ||
427 | void cm_event_connected(struct nes_cm_event *); | ||
428 | void cm_event_connect_error(struct nes_cm_event *); | ||
429 | void cm_event_reset(struct nes_cm_event *); | ||
430 | void cm_event_mpa_req(struct nes_cm_event *); | ||
431 | int nes_cm_post_event(struct nes_cm_event *); | ||
432 | |||
433 | #endif /* NES_CM_H */ | ||
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h new file mode 100644 index 000000000000..da9daba8e668 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_context.h | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef NES_CONTEXT_H | ||
34 | #define NES_CONTEXT_H | ||
35 | |||
36 | struct nes_qp_context { | ||
37 | __le32 misc; | ||
38 | __le32 cqs; | ||
39 | __le32 sq_addr_low; | ||
40 | __le32 sq_addr_high; | ||
41 | __le32 rq_addr_low; | ||
42 | __le32 rq_addr_high; | ||
43 | __le32 misc2; | ||
44 | __le16 tcpPorts[2]; | ||
45 | __le32 ip0; | ||
46 | __le32 ip1; | ||
47 | __le32 ip2; | ||
48 | __le32 ip3; | ||
49 | __le32 mss; | ||
50 | __le32 arp_index_vlan; | ||
51 | __le32 tcp_state_flow_label; | ||
52 | __le32 pd_index_wscale; | ||
53 | __le32 keepalive; | ||
54 | u32 ts_recent; | ||
55 | u32 ts_age; | ||
56 | __le32 snd_nxt; | ||
57 | __le32 snd_wnd; | ||
58 | __le32 rcv_nxt; | ||
59 | __le32 rcv_wnd; | ||
60 | __le32 snd_max; | ||
61 | __le32 snd_una; | ||
62 | u32 srtt; | ||
63 | __le32 rttvar; | ||
64 | __le32 ssthresh; | ||
65 | __le32 cwnd; | ||
66 | __le32 snd_wl1; | ||
67 | __le32 snd_wl2; | ||
68 | __le32 max_snd_wnd; | ||
69 | __le32 ts_val_delta; | ||
70 | u32 retransmit; | ||
71 | u32 probe_cnt; | ||
72 | u32 hte_index; | ||
73 | __le32 q2_addr_low; | ||
74 | __le32 q2_addr_high; | ||
75 | __le32 ird_index; | ||
76 | u32 Rsvd3; | ||
77 | __le32 ird_ord_sizes; | ||
78 | u32 mrkr_offset; | ||
79 | __le32 aeq_token_low; | ||
80 | __le32 aeq_token_high; | ||
81 | }; | ||
82 | |||
83 | /* QP Context Misc Field */ | ||
84 | |||
85 | #define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003 | ||
86 | #define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0 | ||
87 | #define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0 | ||
88 | #define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6 | ||
89 | #define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300 | ||
90 | #define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8 | ||
91 | #define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00 | ||
92 | #define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10 | ||
93 | #define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000 | ||
94 | #define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12 | ||
95 | #define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000 | ||
96 | #define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16 | ||
97 | |||
98 | enum nes_qp_context_misc_bits { | ||
99 | NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004, | ||
100 | NES_QPCONTEXT_MISC_IPV4 = 0x00000008, | ||
101 | NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010, | ||
102 | NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020, | ||
103 | NES_QPCONTEXT_MISC_DROS = 0x00008000, | ||
104 | NES_QPCONTEXT_MISC_WSCALE = 0x00080000, | ||
105 | NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000, | ||
106 | NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000, | ||
107 | NES_QPCONTEXT_MISC_SACK = 0x00400000, | ||
108 | NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000, | ||
109 | NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000, | ||
110 | NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000, | ||
111 | NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000, | ||
112 | NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000, | ||
113 | NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000 | ||
114 | }; | ||
115 | |||
116 | enum nes_qp_acc_wq_sizes { | ||
117 | HCONTEXT_TSA_WQ_SIZE_4 = 0, | ||
118 | HCONTEXT_TSA_WQ_SIZE_32 = 1, | ||
119 | HCONTEXT_TSA_WQ_SIZE_128 = 2, | ||
120 | HCONTEXT_TSA_WQ_SIZE_512 = 3 | ||
121 | }; | ||
122 | |||
123 | /* QP Context Misc2 Fields */ | ||
124 | #define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff | ||
125 | #define NES_QPCONTEXT_MISC2_TTL_SHIFT 0 | ||
126 | #define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff | ||
127 | #define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0 | ||
128 | #define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300 | ||
129 | #define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8 | ||
130 | #define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00 | ||
131 | #define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10 | ||
132 | #define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000 | ||
133 | #define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16 | ||
134 | #define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000 | ||
135 | #define NES_QPCONTEXT_MISC2_TOS_SHIFT 24 | ||
136 | #define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000 | ||
137 | #define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24 | ||
138 | |||
139 | /* QP Context Tcp State/Flow Label Fields */ | ||
140 | #define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff | ||
141 | #define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0 | ||
142 | #define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000 | ||
143 | #define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28 | ||
144 | |||
145 | enum nes_qp_tcp_state { | ||
146 | NES_QPCONTEXT_TCPSTATE_CLOSED = 1, | ||
147 | NES_QPCONTEXT_TCPSTATE_EST = 5, | ||
148 | NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11, | ||
149 | }; | ||
150 | |||
151 | /* QP Context PD Index/wscale Fields */ | ||
152 | #define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f | ||
153 | #define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0 | ||
154 | #define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00 | ||
155 | #define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8 | ||
156 | #define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000 | ||
157 | #define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16 | ||
158 | |||
159 | /* QP Context Keepalive Fields */ | ||
160 | #define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff | ||
161 | #define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0 | ||
162 | #define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000 | ||
163 | #define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16 | ||
164 | #define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000 | ||
165 | #define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24 | ||
166 | |||
167 | /* QP Context ORD/IRD Fields */ | ||
168 | #define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f | ||
169 | #define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0 | ||
170 | #define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000 | ||
171 | #define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16 | ||
172 | #define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000 | ||
173 | #define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28 | ||
174 | |||
175 | enum nes_ord_ird_bits { | ||
176 | NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000, | ||
177 | NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000, | ||
178 | NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000, | ||
179 | NES_QPCONTEXT_ORDIRD_AAH = 0x40000000, | ||
180 | NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000 | ||
181 | }; | ||
182 | |||
183 | enum nes_iwarp_qp_state { | ||
184 | NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0, | ||
185 | NES_QPCONTEXT_IWARP_STATE_IDLE = 1, | ||
186 | NES_QPCONTEXT_IWARP_STATE_RTS = 2, | ||
187 | NES_QPCONTEXT_IWARP_STATE_CLOSING = 3, | ||
188 | NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5, | ||
189 | NES_QPCONTEXT_IWARP_STATE_ERROR = 6 | ||
190 | }; | ||
191 | |||
192 | |||
193 | #endif /* NES_CONTEXT_H */ | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c new file mode 100644 index 000000000000..7c4c0fbf0abd --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -0,0 +1,3080 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/if_vlan.h> | ||
41 | |||
42 | #include "nes.h" | ||
43 | |||
44 | u32 crit_err_count = 0; | ||
45 | u32 int_mod_timer_init; | ||
46 | u32 int_mod_cq_depth_256; | ||
47 | u32 int_mod_cq_depth_128; | ||
48 | u32 int_mod_cq_depth_32; | ||
49 | u32 int_mod_cq_depth_24; | ||
50 | u32 int_mod_cq_depth_16; | ||
51 | u32 int_mod_cq_depth_4; | ||
52 | u32 int_mod_cq_depth_1; | ||
53 | |||
54 | #include "nes_cm.h" | ||
55 | |||
56 | |||
57 | #ifdef CONFIG_INFINIBAND_NES_DEBUG | ||
58 | static unsigned char *nes_iwarp_state_str[] = { | ||
59 | "Non-Existant", | ||
60 | "Idle", | ||
61 | "RTS", | ||
62 | "Closing", | ||
63 | "RSVD1", | ||
64 | "Terminate", | ||
65 | "Error", | ||
66 | "RSVD2", | ||
67 | }; | ||
68 | |||
69 | static unsigned char *nes_tcp_state_str[] = { | ||
70 | "Non-Existant", | ||
71 | "Closed", | ||
72 | "Listen", | ||
73 | "SYN Sent", | ||
74 | "SYN Rcvd", | ||
75 | "Established", | ||
76 | "Close Wait", | ||
77 | "FIN Wait 1", | ||
78 | "Closing", | ||
79 | "Last Ack", | ||
80 | "FIN Wait 2", | ||
81 | "Time Wait", | ||
82 | "RSVD1", | ||
83 | "RSVD2", | ||
84 | "RSVD3", | ||
85 | "RSVD4", | ||
86 | }; | ||
87 | #endif | ||
88 | |||
89 | |||
90 | /** | ||
91 | * nes_nic_init_timer_defaults | ||
92 | */ | ||
93 | void nes_nic_init_timer_defaults(struct nes_device *nesdev, u8 jumbomode) | ||
94 | { | ||
95 | unsigned long flags; | ||
96 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
97 | struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; | ||
98 | |||
99 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | ||
100 | |||
101 | shared_timer->timer_in_use_min = NES_NIC_FAST_TIMER_LOW; | ||
102 | shared_timer->timer_in_use_max = NES_NIC_FAST_TIMER_HIGH; | ||
103 | if (jumbomode) { | ||
104 | shared_timer->threshold_low = DEFAULT_JUMBO_NES_QL_LOW; | ||
105 | shared_timer->threshold_target = DEFAULT_JUMBO_NES_QL_TARGET; | ||
106 | shared_timer->threshold_high = DEFAULT_JUMBO_NES_QL_HIGH; | ||
107 | } else { | ||
108 | shared_timer->threshold_low = DEFAULT_NES_QL_LOW; | ||
109 | shared_timer->threshold_target = DEFAULT_NES_QL_TARGET; | ||
110 | shared_timer->threshold_high = DEFAULT_NES_QL_HIGH; | ||
111 | } | ||
112 | |||
113 | /* todo use netdev->mtu to set thresholds */ | ||
114 | spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); | ||
115 | } | ||
116 | |||
117 | |||
118 | /** | ||
119 | * nes_nic_init_timer | ||
120 | */ | ||
121 | static void nes_nic_init_timer(struct nes_device *nesdev) | ||
122 | { | ||
123 | unsigned long flags; | ||
124 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
125 | struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; | ||
126 | |||
127 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | ||
128 | |||
129 | if (shared_timer->timer_in_use_old == 0) { | ||
130 | nesdev->deepcq_count = 0; | ||
131 | shared_timer->timer_direction_upward = 0; | ||
132 | shared_timer->timer_direction_downward = 0; | ||
133 | shared_timer->timer_in_use = NES_NIC_FAST_TIMER; | ||
134 | shared_timer->timer_in_use_old = 0; | ||
135 | |||
136 | } | ||
137 | if (shared_timer->timer_in_use != shared_timer->timer_in_use_old) { | ||
138 | shared_timer->timer_in_use_old = shared_timer->timer_in_use; | ||
139 | nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, | ||
140 | 0x80000000 | ((u32)(shared_timer->timer_in_use*8))); | ||
141 | } | ||
142 | /* todo use netdev->mtu to set thresholds */ | ||
143 | spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); | ||
144 | } | ||
145 | |||
146 | |||
147 | /** | ||
148 | * nes_nic_tune_timer | ||
149 | */ | ||
150 | static void nes_nic_tune_timer(struct nes_device *nesdev) | ||
151 | { | ||
152 | unsigned long flags; | ||
153 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
154 | struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; | ||
155 | u16 cq_count = nesdev->currcq_count; | ||
156 | |||
157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | ||
158 | |||
159 | if (shared_timer->cq_count_old < cq_count) { | ||
160 | if (cq_count > shared_timer->threshold_low) | ||
161 | shared_timer->cq_direction_downward=0; | ||
162 | } | ||
163 | if (shared_timer->cq_count_old >= cq_count) | ||
164 | shared_timer->cq_direction_downward++; | ||
165 | shared_timer->cq_count_old = cq_count; | ||
166 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { | ||
167 | if (cq_count <= shared_timer->threshold_low) { | ||
168 | shared_timer->threshold_low = shared_timer->threshold_low/2; | ||
169 | shared_timer->cq_direction_downward=0; | ||
170 | nesdev->currcq_count = 0; | ||
171 | spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); | ||
172 | return; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | if (cq_count > 1) { | ||
177 | nesdev->deepcq_count += cq_count; | ||
178 | if (cq_count <= shared_timer->threshold_low) { /* increase timer gently */ | ||
179 | shared_timer->timer_direction_upward++; | ||
180 | shared_timer->timer_direction_downward = 0; | ||
181 | } else if (cq_count <= shared_timer->threshold_target) { /* balanced */ | ||
182 | shared_timer->timer_direction_upward = 0; | ||
183 | shared_timer->timer_direction_downward = 0; | ||
184 | } else if (cq_count <= shared_timer->threshold_high) { /* decrease timer gently */ | ||
185 | shared_timer->timer_direction_downward++; | ||
186 | shared_timer->timer_direction_upward = 0; | ||
187 | } else if (cq_count <= (shared_timer->threshold_high) * 2) { | ||
188 | shared_timer->timer_in_use -= 2; | ||
189 | shared_timer->timer_direction_upward = 0; | ||
190 | shared_timer->timer_direction_downward++; | ||
191 | } else { | ||
192 | shared_timer->timer_in_use -= 4; | ||
193 | shared_timer->timer_direction_upward = 0; | ||
194 | shared_timer->timer_direction_downward++; | ||
195 | } | ||
196 | |||
197 | if (shared_timer->timer_direction_upward > 3 ) { /* using history */ | ||
198 | shared_timer->timer_in_use += 3; | ||
199 | shared_timer->timer_direction_upward = 0; | ||
200 | shared_timer->timer_direction_downward = 0; | ||
201 | } | ||
202 | if (shared_timer->timer_direction_downward > 5) { /* using history */ | ||
203 | shared_timer->timer_in_use -= 4 ; | ||
204 | shared_timer->timer_direction_downward = 0; | ||
205 | shared_timer->timer_direction_upward = 0; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* boundary checking */ | ||
210 | if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH) | ||
211 | shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH; | ||
212 | else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) { | ||
213 | shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW; | ||
214 | } | ||
215 | |||
216 | nesdev->currcq_count = 0; | ||
217 | |||
218 | spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); | ||
219 | } | ||
220 | |||
221 | |||
222 | /** | ||
223 | * nes_init_adapter - initialize adapter | ||
224 | */ | ||
225 | struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { | ||
226 | struct nes_adapter *nesadapter = NULL; | ||
227 | unsigned long num_pds; | ||
228 | u32 u32temp; | ||
229 | u32 port_count; | ||
230 | u16 max_rq_wrs; | ||
231 | u16 max_sq_wrs; | ||
232 | u32 max_mr; | ||
233 | u32 max_256pbl; | ||
234 | u32 max_4kpbl; | ||
235 | u32 max_qp; | ||
236 | u32 max_irrq; | ||
237 | u32 max_cq; | ||
238 | u32 hte_index_mask; | ||
239 | u32 adapter_size; | ||
240 | u32 arp_table_size; | ||
241 | u16 vendor_id; | ||
242 | u8 OneG_Mode; | ||
243 | u8 func_index; | ||
244 | |||
245 | /* search the list of existing adapters */ | ||
246 | list_for_each_entry(nesadapter, &nes_adapter_list, list) { | ||
247 | nes_debug(NES_DBG_INIT, "Searching Adapter list for PCI devfn = 0x%X," | ||
248 | " adapter PCI slot/bus = %u/%u, pci devices PCI slot/bus = %u/%u, .\n", | ||
249 | nesdev->pcidev->devfn, | ||
250 | PCI_SLOT(nesadapter->devfn), | ||
251 | nesadapter->bus_number, | ||
252 | PCI_SLOT(nesdev->pcidev->devfn), | ||
253 | nesdev->pcidev->bus->number ); | ||
254 | if ((PCI_SLOT(nesadapter->devfn) == PCI_SLOT(nesdev->pcidev->devfn)) && | ||
255 | (nesadapter->bus_number == nesdev->pcidev->bus->number)) { | ||
256 | nesadapter->ref_count++; | ||
257 | return nesadapter; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* no adapter found */ | ||
262 | num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT; | ||
263 | if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) { | ||
264 | nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n", | ||
265 | hw_rev); | ||
266 | return NULL; | ||
267 | } | ||
268 | |||
269 | nes_debug(NES_DBG_INIT, "Determine Soft Reset, QP_control=0x%x, CPU0=0x%x, CPU1=0x%x, CPU2=0x%x\n", | ||
270 | nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + PCI_FUNC(nesdev->pcidev->devfn) * 8), | ||
271 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS), | ||
272 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 4), | ||
273 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 8)); | ||
274 | |||
275 | nes_debug(NES_DBG_INIT, "Reset and init NE020\n"); | ||
276 | |||
277 | |||
278 | if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0) | ||
279 | return NULL; | ||
280 | if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode)) | ||
281 | return NULL; | ||
282 | nes_init_csr_ne020(nesdev, hw_rev, port_count); | ||
283 | |||
284 | max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE); | ||
285 | nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp); | ||
286 | |||
287 | u32temp = nes_read_indexed(nesdev, NES_IDX_QUAD_HASH_TABLE_SIZE); | ||
288 | if (max_qp > ((u32)1 << (u32temp & 0x001f))) { | ||
289 | nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to hash table size = 0x%08X\n", | ||
290 | max_qp, u32temp); | ||
291 | max_qp = (u32)1 << (u32temp & 0x001f); | ||
292 | } | ||
293 | |||
294 | hte_index_mask = ((u32)1 << ((u32temp & 0x001f)+1))-1; | ||
295 | nes_debug(NES_DBG_INIT, "Max QP = %u, hte_index_mask = 0x%08X.\n", | ||
296 | max_qp, hte_index_mask); | ||
297 | |||
298 | u32temp = nes_read_indexed(nesdev, NES_IDX_IRRQ_COUNT); | ||
299 | |||
300 | max_irrq = 1 << (u32temp & 0x001f); | ||
301 | |||
302 | if (max_qp > max_irrq) { | ||
303 | max_qp = max_irrq; | ||
304 | nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to Available Q1s.\n", | ||
305 | max_qp); | ||
306 | } | ||
307 | |||
308 | /* there should be no reason to allocate more pds than qps */ | ||
309 | if (num_pds > max_qp) | ||
310 | num_pds = max_qp; | ||
311 | |||
312 | u32temp = nes_read_indexed(nesdev, NES_IDX_MRT_SIZE); | ||
313 | max_mr = (u32)8192 << (u32temp & 0x7); | ||
314 | |||
315 | u32temp = nes_read_indexed(nesdev, NES_IDX_PBL_REGION_SIZE); | ||
316 | max_256pbl = (u32)1 << (u32temp & 0x0000001f); | ||
317 | max_4kpbl = (u32)1 << ((u32temp >> 16) & 0x0000001f); | ||
318 | max_cq = nes_read_indexed(nesdev, NES_IDX_CQ_CTX_SIZE); | ||
319 | |||
320 | u32temp = nes_read_indexed(nesdev, NES_IDX_ARP_CACHE_SIZE); | ||
321 | arp_table_size = 1 << u32temp; | ||
322 | |||
323 | adapter_size = (sizeof(struct nes_adapter) + | ||
324 | (sizeof(unsigned long)-1)) & (~(sizeof(unsigned long)-1)); | ||
325 | adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp); | ||
326 | adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr); | ||
327 | adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq); | ||
328 | adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds); | ||
329 | adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size); | ||
330 | adapter_size += sizeof(struct nes_qp **) * max_qp; | ||
331 | |||
332 | /* allocate a new adapter struct */ | ||
333 | nesadapter = kzalloc(adapter_size, GFP_KERNEL); | ||
334 | if (nesadapter == NULL) { | ||
335 | return NULL; | ||
336 | } | ||
337 | |||
338 | nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n", | ||
339 | nesadapter, (u32)sizeof(struct nes_adapter), adapter_size); | ||
340 | |||
341 | /* populate the new nesadapter */ | ||
342 | nesadapter->devfn = nesdev->pcidev->devfn; | ||
343 | nesadapter->bus_number = nesdev->pcidev->bus->number; | ||
344 | nesadapter->ref_count = 1; | ||
345 | nesadapter->timer_int_req = 0xffff0000; | ||
346 | nesadapter->OneG_Mode = OneG_Mode; | ||
347 | nesadapter->doorbell_start = nesdev->doorbell_region; | ||
348 | |||
349 | /* nesadapter->tick_delta = clk_divisor; */ | ||
350 | nesadapter->hw_rev = hw_rev; | ||
351 | nesadapter->port_count = port_count; | ||
352 | |||
353 | nesadapter->max_qp = max_qp; | ||
354 | nesadapter->hte_index_mask = hte_index_mask; | ||
355 | nesadapter->max_irrq = max_irrq; | ||
356 | nesadapter->max_mr = max_mr; | ||
357 | nesadapter->max_256pbl = max_256pbl - 1; | ||
358 | nesadapter->max_4kpbl = max_4kpbl - 1; | ||
359 | nesadapter->max_cq = max_cq; | ||
360 | nesadapter->free_256pbl = max_256pbl - 1; | ||
361 | nesadapter->free_4kpbl = max_4kpbl - 1; | ||
362 | nesadapter->max_pd = num_pds; | ||
363 | nesadapter->arp_table_size = arp_table_size; | ||
364 | |||
365 | nesadapter->et_pkt_rate_low = NES_TIMER_ENABLE_LIMIT; | ||
366 | if (nes_drv_opt & NES_DRV_OPT_DISABLE_INT_MOD) { | ||
367 | nesadapter->et_use_adaptive_rx_coalesce = 0; | ||
368 | nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT; | ||
369 | nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
370 | } else { | ||
371 | nesadapter->et_use_adaptive_rx_coalesce = 1; | ||
372 | nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; | ||
373 | nesadapter->et_rx_coalesce_usecs_irq = 0; | ||
374 | printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__); | ||
375 | } | ||
376 | /* Setup and enable the periodic timer */ | ||
377 | if (nesadapter->et_rx_coalesce_usecs_irq) | ||
378 | nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 | | ||
379 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq * 8))); | ||
380 | else | ||
381 | nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x00000000); | ||
382 | |||
383 | nesadapter->base_pd = 1; | ||
384 | |||
385 | nesadapter->device_cap_flags = | ||
386 | IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW; | ||
387 | |||
388 | nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter) | ||
389 | [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]); | ||
390 | nesadapter->allocated_cqs = &nesadapter->allocated_qps[BITS_TO_LONGS(max_qp)]; | ||
391 | nesadapter->allocated_mrs = &nesadapter->allocated_cqs[BITS_TO_LONGS(max_cq)]; | ||
392 | nesadapter->allocated_pds = &nesadapter->allocated_mrs[BITS_TO_LONGS(max_mr)]; | ||
393 | nesadapter->allocated_arps = &nesadapter->allocated_pds[BITS_TO_LONGS(num_pds)]; | ||
394 | nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]); | ||
395 | |||
396 | |||
397 | /* mark the usual suspect QPs and CQs as in use */ | ||
398 | for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) { | ||
399 | set_bit(u32temp, nesadapter->allocated_qps); | ||
400 | set_bit(u32temp, nesadapter->allocated_cqs); | ||
401 | } | ||
402 | |||
403 | for (u32temp = 0; u32temp < 20; u32temp++) | ||
404 | set_bit(u32temp, nesadapter->allocated_pds); | ||
405 | u32temp = nes_read_indexed(nesdev, NES_IDX_QP_MAX_CFG_SIZES); | ||
406 | |||
407 | max_rq_wrs = ((u32temp >> 8) & 3); | ||
408 | switch (max_rq_wrs) { | ||
409 | case 0: | ||
410 | max_rq_wrs = 4; | ||
411 | break; | ||
412 | case 1: | ||
413 | max_rq_wrs = 16; | ||
414 | break; | ||
415 | case 2: | ||
416 | max_rq_wrs = 32; | ||
417 | break; | ||
418 | case 3: | ||
419 | max_rq_wrs = 512; | ||
420 | break; | ||
421 | } | ||
422 | |||
423 | max_sq_wrs = (u32temp & 3); | ||
424 | switch (max_sq_wrs) { | ||
425 | case 0: | ||
426 | max_sq_wrs = 4; | ||
427 | break; | ||
428 | case 1: | ||
429 | max_sq_wrs = 16; | ||
430 | break; | ||
431 | case 2: | ||
432 | max_sq_wrs = 32; | ||
433 | break; | ||
434 | case 3: | ||
435 | max_sq_wrs = 512; | ||
436 | break; | ||
437 | } | ||
438 | nesadapter->max_qp_wr = min(max_rq_wrs, max_sq_wrs); | ||
439 | nesadapter->max_irrq_wr = (u32temp >> 16) & 3; | ||
440 | |||
441 | nesadapter->max_sge = 4; | ||
442 | nesadapter->max_cqe = 32767; | ||
443 | |||
444 | if (nes_read_eeprom_values(nesdev, nesadapter)) { | ||
445 | printk(KERN_ERR PFX "Unable to read EEPROM data.\n"); | ||
446 | kfree(nesadapter); | ||
447 | return NULL; | ||
448 | } | ||
449 | |||
450 | u32temp = nes_read_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG); | ||
451 | nes_write_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG, | ||
452 | (u32temp & 0xff000000) | (nesadapter->tcp_timer_core_clk_divisor & 0x00ffffff)); | ||
453 | |||
454 | /* setup port configuration */ | ||
455 | if (nesadapter->port_count == 1) { | ||
456 | u32temp = 0x00000000; | ||
457 | if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) | ||
458 | nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002); | ||
459 | else | ||
460 | nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); | ||
461 | } else { | ||
462 | if (nesadapter->port_count == 2) | ||
463 | u32temp = 0x00000044; | ||
464 | else | ||
465 | u32temp = 0x000000e4; | ||
466 | nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); | ||
467 | } | ||
468 | |||
469 | nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp); | ||
470 | nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n", | ||
471 | nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT)); | ||
472 | |||
473 | spin_lock_init(&nesadapter->resource_lock); | ||
474 | spin_lock_init(&nesadapter->phy_lock); | ||
475 | spin_lock_init(&nesadapter->pbl_lock); | ||
476 | spin_lock_init(&nesadapter->periodic_timer_lock); | ||
477 | |||
478 | INIT_LIST_HEAD(&nesadapter->nesvnic_list[0]); | ||
479 | INIT_LIST_HEAD(&nesadapter->nesvnic_list[1]); | ||
480 | INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]); | ||
481 | INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]); | ||
482 | |||
483 | if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) { | ||
484 | u32 pcs_control_status0, pcs_control_status1; | ||
485 | u32 reset_value; | ||
486 | u32 i = 0; | ||
487 | u32 int_cnt = 0; | ||
488 | u32 ext_cnt = 0; | ||
489 | unsigned long flags; | ||
490 | u32 j = 0; | ||
491 | |||
492 | pcs_control_status0 = nes_read_indexed(nesdev, | ||
493 | NES_IDX_PHY_PCS_CONTROL_STATUS0); | ||
494 | pcs_control_status1 = nes_read_indexed(nesdev, | ||
495 | NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); | ||
496 | |||
497 | for (i = 0; i < NES_MAX_LINK_CHECK; i++) { | ||
498 | pcs_control_status0 = nes_read_indexed(nesdev, | ||
499 | NES_IDX_PHY_PCS_CONTROL_STATUS0); | ||
500 | pcs_control_status1 = nes_read_indexed(nesdev, | ||
501 | NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); | ||
502 | if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) | ||
503 | || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) | ||
504 | int_cnt++; | ||
505 | msleep(1); | ||
506 | } | ||
507 | if (int_cnt > 1) { | ||
508 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
509 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); | ||
510 | mh_detected++; | ||
511 | reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); | ||
512 | reset_value |= 0x0000003d; | ||
513 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); | ||
514 | |||
515 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) | ||
516 | & 0x00000040) != 0x00000040) && (j++ < 5000)); | ||
517 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
518 | |||
519 | pcs_control_status0 = nes_read_indexed(nesdev, | ||
520 | NES_IDX_PHY_PCS_CONTROL_STATUS0); | ||
521 | pcs_control_status1 = nes_read_indexed(nesdev, | ||
522 | NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); | ||
523 | |||
524 | for (i = 0; i < NES_MAX_LINK_CHECK; i++) { | ||
525 | pcs_control_status0 = nes_read_indexed(nesdev, | ||
526 | NES_IDX_PHY_PCS_CONTROL_STATUS0); | ||
527 | pcs_control_status1 = nes_read_indexed(nesdev, | ||
528 | NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); | ||
529 | if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) | ||
530 | || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) { | ||
531 | if (++ext_cnt > int_cnt) { | ||
532 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
533 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, | ||
534 | 0x0000F0C8); | ||
535 | mh_detected++; | ||
536 | reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); | ||
537 | reset_value |= 0x0000003d; | ||
538 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); | ||
539 | |||
540 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) | ||
541 | & 0x00000040) != 0x00000040) && (j++ < 5000)); | ||
542 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
543 | break; | ||
544 | } | ||
545 | } | ||
546 | msleep(1); | ||
547 | } | ||
548 | } | ||
549 | } | ||
550 | |||
551 | if (nesadapter->hw_rev == NE020_REV) { | ||
552 | init_timer(&nesadapter->mh_timer); | ||
553 | nesadapter->mh_timer.function = nes_mh_fix; | ||
554 | nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */ | ||
555 | nesadapter->mh_timer.data = (unsigned long)nesdev; | ||
556 | add_timer(&nesadapter->mh_timer); | ||
557 | } else { | ||
558 | nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000); | ||
559 | } | ||
560 | |||
561 | init_timer(&nesadapter->lc_timer); | ||
562 | nesadapter->lc_timer.function = nes_clc; | ||
563 | nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */ | ||
564 | nesadapter->lc_timer.data = (unsigned long)nesdev; | ||
565 | add_timer(&nesadapter->lc_timer); | ||
566 | |||
567 | list_add_tail(&nesadapter->list, &nes_adapter_list); | ||
568 | |||
569 | for (func_index = 0; func_index < 8; func_index++) { | ||
570 | pci_bus_read_config_word(nesdev->pcidev->bus, | ||
571 | PCI_DEVFN(PCI_SLOT(nesdev->pcidev->devfn), | ||
572 | func_index), 0, &vendor_id); | ||
573 | if (vendor_id == 0xffff) | ||
574 | break; | ||
575 | } | ||
576 | nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__, | ||
577 | func_index, pci_name(nesdev->pcidev)); | ||
578 | nesadapter->adapter_fcn_count = func_index; | ||
579 | |||
580 | return nesadapter; | ||
581 | } | ||
582 | |||
583 | |||
584 | /** | ||
585 | * nes_reset_adapter_ne020 | ||
586 | */ | ||
587 | unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode) | ||
588 | { | ||
589 | u32 port_count; | ||
590 | u32 u32temp; | ||
591 | u32 i; | ||
592 | |||
593 | u32temp = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); | ||
594 | port_count = ((u32temp & 0x00000300) >> 8) + 1; | ||
595 | /* TODO: assuming that both SERDES are set the same for now */ | ||
596 | *OneG_Mode = (u32temp & 0x00003c00) ? 0 : 1; | ||
597 | nes_debug(NES_DBG_INIT, "Initial Software Reset = 0x%08X, port_count=%u\n", | ||
598 | u32temp, port_count); | ||
599 | if (*OneG_Mode) | ||
600 | nes_debug(NES_DBG_INIT, "Running in 1G mode.\n"); | ||
601 | u32temp &= 0xff00ffc0; | ||
602 | switch (port_count) { | ||
603 | case 1: | ||
604 | u32temp |= 0x00ee0000; | ||
605 | break; | ||
606 | case 2: | ||
607 | u32temp |= 0x00cc0000; | ||
608 | break; | ||
609 | case 4: | ||
610 | u32temp |= 0x00000000; | ||
611 | break; | ||
612 | default: | ||
613 | return 0; | ||
614 | break; | ||
615 | } | ||
616 | |||
617 | /* check and do full reset if needed */ | ||
618 | if (nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))) { | ||
619 | nes_debug(NES_DBG_INIT, "Issuing Full Soft reset = 0x%08X\n", u32temp | 0xd); | ||
620 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd); | ||
621 | |||
622 | i = 0; | ||
623 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) | ||
624 | mdelay(1); | ||
625 | if (i >= 10000) { | ||
626 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); | ||
627 | return 0; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | /* port reset */ | ||
632 | switch (port_count) { | ||
633 | case 1: | ||
634 | u32temp |= 0x00ee0010; | ||
635 | break; | ||
636 | case 2: | ||
637 | u32temp |= 0x00cc0030; | ||
638 | break; | ||
639 | case 4: | ||
640 | u32temp |= 0x00000030; | ||
641 | break; | ||
642 | } | ||
643 | |||
644 | nes_debug(NES_DBG_INIT, "Issuing Port Soft reset = 0x%08X\n", u32temp | 0xd); | ||
645 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd); | ||
646 | |||
647 | i = 0; | ||
648 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) | ||
649 | mdelay(1); | ||
650 | if (i >= 10000) { | ||
651 | nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /* serdes 0 */ | ||
656 | i = 0; | ||
657 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) | ||
658 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) | ||
659 | mdelay(1); | ||
660 | if (i >= 5000) { | ||
661 | nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); | ||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | /* serdes 1 */ | ||
666 | if (port_count > 1) { | ||
667 | i = 0; | ||
668 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) | ||
669 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) | ||
670 | mdelay(1); | ||
671 | if (i >= 5000) { | ||
672 | nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); | ||
673 | return 0; | ||
674 | } | ||
675 | } | ||
676 | |||
677 | |||
678 | |||
679 | i = 0; | ||
680 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) | ||
681 | mdelay(1); | ||
682 | if (i >= 10000) { | ||
683 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", | ||
684 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); | ||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | return port_count; | ||
689 | } | ||
690 | |||
691 | |||
692 | /** | ||
693 | * nes_init_serdes | ||
694 | */ | ||
695 | int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode) | ||
696 | { | ||
697 | int i; | ||
698 | u32 u32temp; | ||
699 | |||
700 | if (hw_rev != NE020_REV) { | ||
701 | /* init serdes 0 */ | ||
702 | |||
703 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); | ||
704 | if (!OneG_Mode) | ||
705 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); | ||
706 | if (port_count > 1) { | ||
707 | /* init serdes 1 */ | ||
708 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); | ||
709 | if (!OneG_Mode) | ||
710 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); | ||
711 | } | ||
712 | } else { | ||
713 | /* init serdes 0 */ | ||
714 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); | ||
715 | i = 0; | ||
716 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) | ||
717 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) | ||
718 | mdelay(1); | ||
719 | if (i >= 5000) { | ||
720 | nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); | ||
721 | return 1; | ||
722 | } | ||
723 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7); | ||
724 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000); | ||
725 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000); | ||
726 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000); | ||
727 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000); | ||
728 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000); | ||
729 | if (OneG_Mode) | ||
730 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222); | ||
731 | else | ||
732 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222); | ||
733 | |||
734 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff); | ||
735 | if (port_count > 1) { | ||
736 | /* init serdes 1 */ | ||
737 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x00000048); | ||
738 | i = 0; | ||
739 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) | ||
740 | & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) | ||
741 | mdelay(1); | ||
742 | if (i >= 5000) { | ||
743 | printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp); | ||
744 | /* return 1; */ | ||
745 | } | ||
746 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7); | ||
747 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE1, 0x9ce73000); | ||
748 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE1, 0x0ff00000); | ||
749 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET1, 0x00000000); | ||
750 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS1, 0x00000000); | ||
751 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1, 0x00000000); | ||
752 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL1, 0xf0002222); | ||
753 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000ff); | ||
754 | } | ||
755 | } | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | |||
760 | /** | ||
761 | * nes_init_csr_ne020 | ||
762 | * Initialize registers for ne020 hardware | ||
763 | */ | ||
764 | void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count) | ||
765 | { | ||
766 | u32 u32temp; | ||
767 | |||
768 | nes_debug(NES_DBG_INIT, "port_count=%d\n", port_count); | ||
769 | |||
770 | nes_write_indexed(nesdev, 0x000001E4, 0x00000007); | ||
771 | /* nes_write_indexed(nesdev, 0x000001E8, 0x000208C4); */ | ||
772 | nes_write_indexed(nesdev, 0x000001E8, 0x00020874); | ||
773 | nes_write_indexed(nesdev, 0x000001D8, 0x00048002); | ||
774 | /* nes_write_indexed(nesdev, 0x000001D8, 0x0004B002); */ | ||
775 | nes_write_indexed(nesdev, 0x000001FC, 0x00050005); | ||
776 | nes_write_indexed(nesdev, 0x00000600, 0x55555555); | ||
777 | nes_write_indexed(nesdev, 0x00000604, 0x55555555); | ||
778 | |||
779 | /* TODO: move these MAC register settings to NIC bringup */ | ||
780 | nes_write_indexed(nesdev, 0x00002000, 0x00000001); | ||
781 | nes_write_indexed(nesdev, 0x00002004, 0x00000001); | ||
782 | nes_write_indexed(nesdev, 0x00002008, 0x0000FFFF); | ||
783 | nes_write_indexed(nesdev, 0x0000200C, 0x00000001); | ||
784 | nes_write_indexed(nesdev, 0x00002010, 0x000003c1); | ||
785 | nes_write_indexed(nesdev, 0x0000201C, 0x75345678); | ||
786 | if (port_count > 1) { | ||
787 | nes_write_indexed(nesdev, 0x00002200, 0x00000001); | ||
788 | nes_write_indexed(nesdev, 0x00002204, 0x00000001); | ||
789 | nes_write_indexed(nesdev, 0x00002208, 0x0000FFFF); | ||
790 | nes_write_indexed(nesdev, 0x0000220C, 0x00000001); | ||
791 | nes_write_indexed(nesdev, 0x00002210, 0x000003c1); | ||
792 | nes_write_indexed(nesdev, 0x0000221C, 0x75345678); | ||
793 | nes_write_indexed(nesdev, 0x00000908, 0x20000001); | ||
794 | } | ||
795 | if (port_count > 2) { | ||
796 | nes_write_indexed(nesdev, 0x00002400, 0x00000001); | ||
797 | nes_write_indexed(nesdev, 0x00002404, 0x00000001); | ||
798 | nes_write_indexed(nesdev, 0x00002408, 0x0000FFFF); | ||
799 | nes_write_indexed(nesdev, 0x0000240C, 0x00000001); | ||
800 | nes_write_indexed(nesdev, 0x00002410, 0x000003c1); | ||
801 | nes_write_indexed(nesdev, 0x0000241C, 0x75345678); | ||
802 | nes_write_indexed(nesdev, 0x00000910, 0x20000001); | ||
803 | |||
804 | nes_write_indexed(nesdev, 0x00002600, 0x00000001); | ||
805 | nes_write_indexed(nesdev, 0x00002604, 0x00000001); | ||
806 | nes_write_indexed(nesdev, 0x00002608, 0x0000FFFF); | ||
807 | nes_write_indexed(nesdev, 0x0000260C, 0x00000001); | ||
808 | nes_write_indexed(nesdev, 0x00002610, 0x000003c1); | ||
809 | nes_write_indexed(nesdev, 0x0000261C, 0x75345678); | ||
810 | nes_write_indexed(nesdev, 0x00000918, 0x20000001); | ||
811 | } | ||
812 | |||
813 | nes_write_indexed(nesdev, 0x00005000, 0x00018000); | ||
814 | /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */ | ||
815 | nes_write_indexed(nesdev, 0x00005004, 0x00020001); | ||
816 | nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F); | ||
817 | nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F); | ||
818 | nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F); | ||
819 | nes_write_indexed(nesdev, 0x00005020, 0x1F1F1F1F); | ||
820 | nes_write_indexed(nesdev, 0x00006090, 0xFFFFFFFF); | ||
821 | |||
822 | /* TODO: move this to code, get from EEPROM */ | ||
823 | nes_write_indexed(nesdev, 0x00000900, 0x20000001); | ||
824 | nes_write_indexed(nesdev, 0x000060C0, 0x0000028e); | ||
825 | nes_write_indexed(nesdev, 0x000060C8, 0x00000020); | ||
826 | // | ||
827 | nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0); | ||
828 | /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */ | ||
829 | |||
830 | if (hw_rev != NE020_REV) { | ||
831 | u32temp = nes_read_indexed(nesdev, 0x000008e8); | ||
832 | u32temp |= 0x80000000; | ||
833 | nes_write_indexed(nesdev, 0x000008e8, u32temp); | ||
834 | u32temp = nes_read_indexed(nesdev, 0x000021f8); | ||
835 | u32temp &= 0x7fffffff; | ||
836 | u32temp |= 0x7fff0010; | ||
837 | nes_write_indexed(nesdev, 0x000021f8, u32temp); | ||
838 | } | ||
839 | } | ||
840 | |||
841 | |||
842 | /** | ||
843 | * nes_destroy_adapter - destroy the adapter structure | ||
844 | */ | ||
845 | void nes_destroy_adapter(struct nes_adapter *nesadapter) | ||
846 | { | ||
847 | struct nes_adapter *tmp_adapter; | ||
848 | |||
849 | list_for_each_entry(tmp_adapter, &nes_adapter_list, list) { | ||
850 | nes_debug(NES_DBG_SHUTDOWN, "Nes Adapter list entry = 0x%p.\n", | ||
851 | tmp_adapter); | ||
852 | } | ||
853 | |||
854 | nesadapter->ref_count--; | ||
855 | if (!nesadapter->ref_count) { | ||
856 | if (nesadapter->hw_rev == NE020_REV) { | ||
857 | del_timer(&nesadapter->mh_timer); | ||
858 | } | ||
859 | del_timer(&nesadapter->lc_timer); | ||
860 | |||
861 | list_del(&nesadapter->list); | ||
862 | kfree(nesadapter); | ||
863 | } | ||
864 | } | ||
865 | |||
866 | |||
867 | /** | ||
868 | * nes_init_cqp | ||
869 | */ | ||
870 | int nes_init_cqp(struct nes_device *nesdev) | ||
871 | { | ||
872 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
873 | struct nes_hw_cqp_qp_context *cqp_qp_context; | ||
874 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
875 | struct nes_hw_ceq *ceq; | ||
876 | struct nes_hw_ceq *nic_ceq; | ||
877 | struct nes_hw_aeq *aeq; | ||
878 | void *vmem; | ||
879 | dma_addr_t pmem; | ||
880 | u32 count=0; | ||
881 | u32 cqp_head; | ||
882 | u64 u64temp; | ||
883 | u32 u32temp; | ||
884 | |||
885 | /* allocate CQP memory */ | ||
886 | /* Need to add max_cq to the aeq size once cq overflow checking is added back */ | ||
887 | /* SQ is 512 byte aligned, others are 256 byte aligned */ | ||
888 | nesdev->cqp_mem_size = 512 + | ||
889 | (sizeof(struct nes_hw_cqp_wqe) * NES_CQP_SQ_SIZE) + | ||
890 | (sizeof(struct nes_hw_cqe) * NES_CCQ_SIZE) + | ||
891 | max(((u32)sizeof(struct nes_hw_ceqe) * NES_CCEQ_SIZE), (u32)256) + | ||
892 | max(((u32)sizeof(struct nes_hw_ceqe) * NES_NIC_CEQ_SIZE), (u32)256) + | ||
893 | (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) + | ||
894 | sizeof(struct nes_hw_cqp_qp_context); | ||
895 | |||
896 | nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size, | ||
897 | &nesdev->cqp_pbase); | ||
898 | if (!nesdev->cqp_vbase) { | ||
899 | nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n"); | ||
900 | return -ENOMEM; | ||
901 | } | ||
902 | memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size); | ||
903 | |||
904 | /* Allocate a twice the number of CQP requests as the SQ size */ | ||
905 | nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) * | ||
906 | 2 * NES_CQP_SQ_SIZE, GFP_KERNEL); | ||
907 | if (nesdev->nes_cqp_requests == NULL) { | ||
908 | nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n"); | ||
909 | pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase, | ||
910 | nesdev->cqp.sq_pbase); | ||
911 | return -ENOMEM; | ||
912 | } | ||
913 | |||
914 | nes_debug(NES_DBG_INIT, "Allocated CQP structures at %p (phys = %016lX), size = %u.\n", | ||
915 | nesdev->cqp_vbase, (unsigned long)nesdev->cqp_pbase, nesdev->cqp_mem_size); | ||
916 | |||
917 | spin_lock_init(&nesdev->cqp.lock); | ||
918 | init_waitqueue_head(&nesdev->cqp.waitq); | ||
919 | |||
920 | /* Setup Various Structures */ | ||
921 | vmem = (void *)(((unsigned long)nesdev->cqp_vbase + (512 - 1)) & | ||
922 | ~(unsigned long)(512 - 1)); | ||
923 | pmem = (dma_addr_t)(((unsigned long long)nesdev->cqp_pbase + (512 - 1)) & | ||
924 | ~(unsigned long long)(512 - 1)); | ||
925 | |||
926 | nesdev->cqp.sq_vbase = vmem; | ||
927 | nesdev->cqp.sq_pbase = pmem; | ||
928 | nesdev->cqp.sq_size = NES_CQP_SQ_SIZE; | ||
929 | nesdev->cqp.sq_head = 0; | ||
930 | nesdev->cqp.sq_tail = 0; | ||
931 | nesdev->cqp.qp_id = PCI_FUNC(nesdev->pcidev->devfn); | ||
932 | |||
933 | vmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size); | ||
934 | pmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size); | ||
935 | |||
936 | nesdev->ccq.cq_vbase = vmem; | ||
937 | nesdev->ccq.cq_pbase = pmem; | ||
938 | nesdev->ccq.cq_size = NES_CCQ_SIZE; | ||
939 | nesdev->ccq.cq_head = 0; | ||
940 | nesdev->ccq.ce_handler = nes_cqp_ce_handler; | ||
941 | nesdev->ccq.cq_number = PCI_FUNC(nesdev->pcidev->devfn); | ||
942 | |||
943 | vmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size); | ||
944 | pmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size); | ||
945 | |||
946 | nesdev->ceq_index = PCI_FUNC(nesdev->pcidev->devfn); | ||
947 | ceq = &nesadapter->ceq[nesdev->ceq_index]; | ||
948 | ceq->ceq_vbase = vmem; | ||
949 | ceq->ceq_pbase = pmem; | ||
950 | ceq->ceq_size = NES_CCEQ_SIZE; | ||
951 | ceq->ceq_head = 0; | ||
952 | |||
953 | vmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256); | ||
954 | pmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256); | ||
955 | |||
956 | nesdev->nic_ceq_index = PCI_FUNC(nesdev->pcidev->devfn) + 8; | ||
957 | nic_ceq = &nesadapter->ceq[nesdev->nic_ceq_index]; | ||
958 | nic_ceq->ceq_vbase = vmem; | ||
959 | nic_ceq->ceq_pbase = pmem; | ||
960 | nic_ceq->ceq_size = NES_NIC_CEQ_SIZE; | ||
961 | nic_ceq->ceq_head = 0; | ||
962 | |||
963 | vmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256); | ||
964 | pmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256); | ||
965 | |||
966 | aeq = &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]; | ||
967 | aeq->aeq_vbase = vmem; | ||
968 | aeq->aeq_pbase = pmem; | ||
969 | aeq->aeq_size = nesadapter->max_qp; | ||
970 | aeq->aeq_head = 0; | ||
971 | |||
972 | /* Setup QP Context */ | ||
973 | vmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size); | ||
974 | pmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size); | ||
975 | |||
976 | cqp_qp_context = vmem; | ||
977 | cqp_qp_context->context_words[0] = | ||
978 | cpu_to_le32((PCI_FUNC(nesdev->pcidev->devfn) << 12) + (2 << 10)); | ||
979 | cqp_qp_context->context_words[1] = 0; | ||
980 | cqp_qp_context->context_words[2] = cpu_to_le32((u32)nesdev->cqp.sq_pbase); | ||
981 | cqp_qp_context->context_words[3] = cpu_to_le32(((u64)nesdev->cqp.sq_pbase) >> 32); | ||
982 | |||
983 | |||
984 | /* Write the address to Create CQP */ | ||
985 | if ((sizeof(dma_addr_t) > 4)) { | ||
986 | nes_write_indexed(nesdev, | ||
987 | NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), | ||
988 | ((u64)pmem) >> 32); | ||
989 | } else { | ||
990 | nes_write_indexed(nesdev, | ||
991 | NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), 0); | ||
992 | } | ||
993 | nes_write_indexed(nesdev, | ||
994 | NES_IDX_CREATE_CQP_LOW + (PCI_FUNC(nesdev->pcidev->devfn) * 8), | ||
995 | (u32)pmem); | ||
996 | |||
997 | INIT_LIST_HEAD(&nesdev->cqp_avail_reqs); | ||
998 | INIT_LIST_HEAD(&nesdev->cqp_pending_reqs); | ||
999 | |||
1000 | for (count = 0; count < 2*NES_CQP_SQ_SIZE; count++) { | ||
1001 | init_waitqueue_head(&nesdev->nes_cqp_requests[count].waitq); | ||
1002 | list_add_tail(&nesdev->nes_cqp_requests[count].list, &nesdev->cqp_avail_reqs); | ||
1003 | } | ||
1004 | |||
1005 | /* Write Create CCQ WQE */ | ||
1006 | cqp_head = nesdev->cqp.sq_head++; | ||
1007 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1008 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1009 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1010 | (NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | | ||
1011 | NES_CQP_CQ_CHK_OVERFLOW | ((u32)nesdev->ccq.cq_size << 16))); | ||
1012 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1013 | (nesdev->ccq.cq_number | | ||
1014 | ((u32)nesdev->ceq_index << 16))); | ||
1015 | u64temp = (u64)nesdev->ccq.cq_pbase; | ||
1016 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
1017 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; | ||
1018 | u64temp = (unsigned long)&nesdev->ccq; | ||
1019 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = | ||
1020 | cpu_to_le32((u32)(u64temp >> 1)); | ||
1021 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = | ||
1022 | cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); | ||
1023 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; | ||
1024 | |||
1025 | /* Write Create CEQ WQE */ | ||
1026 | cqp_head = nesdev->cqp.sq_head++; | ||
1027 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1028 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1029 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1030 | (NES_CQP_CREATE_CEQ + ((u32)nesdev->ceq_index << 8))); | ||
1031 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, ceq->ceq_size); | ||
1032 | u64temp = (u64)ceq->ceq_pbase; | ||
1033 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
1034 | |||
1035 | /* Write Create AEQ WQE */ | ||
1036 | cqp_head = nesdev->cqp.sq_head++; | ||
1037 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1038 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1039 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1040 | (NES_CQP_CREATE_AEQ + ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8))); | ||
1041 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX, aeq->aeq_size); | ||
1042 | u64temp = (u64)aeq->aeq_pbase; | ||
1043 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
1044 | |||
1045 | /* Write Create NIC CEQ WQE */ | ||
1046 | cqp_head = nesdev->cqp.sq_head++; | ||
1047 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1048 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1049 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1050 | (NES_CQP_CREATE_CEQ + ((u32)nesdev->nic_ceq_index << 8))); | ||
1051 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, nic_ceq->ceq_size); | ||
1052 | u64temp = (u64)nic_ceq->ceq_pbase; | ||
1053 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
1054 | |||
1055 | /* Poll until CCQP done */ | ||
1056 | count = 0; | ||
1057 | do { | ||
1058 | if (count++ > 1000) { | ||
1059 | printk(KERN_ERR PFX "Error creating CQP\n"); | ||
1060 | pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, | ||
1061 | nesdev->cqp_vbase, nesdev->cqp_pbase); | ||
1062 | return -1; | ||
1063 | } | ||
1064 | udelay(10); | ||
1065 | } while (!(nes_read_indexed(nesdev, | ||
1066 | NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn) * 8)) & (1 << 8))); | ||
1067 | |||
1068 | nes_debug(NES_DBG_INIT, "CQP Status = 0x%08X\n", nes_read_indexed(nesdev, | ||
1069 | NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))); | ||
1070 | |||
1071 | u32temp = 0x04800000; | ||
1072 | nes_write32(nesdev->regs+NES_WQE_ALLOC, u32temp | nesdev->cqp.qp_id); | ||
1073 | |||
1074 | /* wait for the CCQ, CEQ, and AEQ to get created */ | ||
1075 | count = 0; | ||
1076 | do { | ||
1077 | if (count++ > 1000) { | ||
1078 | printk(KERN_ERR PFX "Error creating CCQ, CEQ, and AEQ\n"); | ||
1079 | pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, | ||
1080 | nesdev->cqp_vbase, nesdev->cqp_pbase); | ||
1081 | return -1; | ||
1082 | } | ||
1083 | udelay(10); | ||
1084 | } while (((nes_read_indexed(nesdev, | ||
1085 | NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15<<8)) != (15<<8))); | ||
1086 | |||
1087 | /* dump the QP status value */ | ||
1088 | nes_debug(NES_DBG_INIT, "QP Status = 0x%08X\n", nes_read_indexed(nesdev, | ||
1089 | NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))); | ||
1090 | |||
1091 | nesdev->cqp.sq_tail++; | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | |||
1097 | /** | ||
1098 | * nes_destroy_cqp | ||
1099 | */ | ||
1100 | int nes_destroy_cqp(struct nes_device *nesdev) | ||
1101 | { | ||
1102 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1103 | u32 count = 0; | ||
1104 | u32 cqp_head; | ||
1105 | unsigned long flags; | ||
1106 | |||
1107 | do { | ||
1108 | if (count++ > 1000) | ||
1109 | break; | ||
1110 | udelay(10); | ||
1111 | } while (!(nesdev->cqp.sq_head == nesdev->cqp.sq_tail)); | ||
1112 | |||
1113 | /* Reset CCQ */ | ||
1114 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_RESET | | ||
1115 | nesdev->ccq.cq_number); | ||
1116 | |||
1117 | /* Disable device interrupts */ | ||
1118 | nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff); | ||
1119 | |||
1120 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1121 | |||
1122 | /* Destroy the AEQ */ | ||
1123 | cqp_head = nesdev->cqp.sq_head++; | ||
1124 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
1125 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1126 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_AEQ | | ||
1127 | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8)); | ||
1128 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0; | ||
1129 | |||
1130 | /* Destroy the NIC CEQ */ | ||
1131 | cqp_head = nesdev->cqp.sq_head++; | ||
1132 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
1133 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1134 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ | | ||
1135 | ((u32)nesdev->nic_ceq_index << 8)); | ||
1136 | |||
1137 | /* Destroy the CEQ */ | ||
1138 | cqp_head = nesdev->cqp.sq_head++; | ||
1139 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
1140 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1141 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ | | ||
1142 | (nesdev->ceq_index << 8)); | ||
1143 | |||
1144 | /* Destroy the CCQ */ | ||
1145 | cqp_head = nesdev->cqp.sq_head++; | ||
1146 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
1147 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1148 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CQ); | ||
1149 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->ccq.cq_number | | ||
1150 | ((u32)nesdev->ceq_index << 16)); | ||
1151 | |||
1152 | /* Destroy CQP */ | ||
1153 | cqp_head = nesdev->cqp.sq_head++; | ||
1154 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
1155 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1156 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_QP | | ||
1157 | NES_CQP_QP_TYPE_CQP); | ||
1158 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->cqp.qp_id); | ||
1159 | |||
1160 | barrier(); | ||
1161 | /* Ring doorbell (5 WQEs) */ | ||
1162 | nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x05800000 | nesdev->cqp.qp_id); | ||
1163 | |||
1164 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1165 | |||
1166 | /* wait for the CCQ, CEQ, and AEQ to get destroyed */ | ||
1167 | count = 0; | ||
1168 | do { | ||
1169 | if (count++ > 1000) { | ||
1170 | printk(KERN_ERR PFX "Function%d: Error destroying CCQ, CEQ, and AEQ\n", | ||
1171 | PCI_FUNC(nesdev->pcidev->devfn)); | ||
1172 | break; | ||
1173 | } | ||
1174 | udelay(10); | ||
1175 | } while (((nes_read_indexed(nesdev, | ||
1176 | NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15 << 8)) != 0)); | ||
1177 | |||
1178 | /* dump the QP status value */ | ||
1179 | nes_debug(NES_DBG_SHUTDOWN, "Function%d: QP Status = 0x%08X\n", | ||
1180 | PCI_FUNC(nesdev->pcidev->devfn), | ||
1181 | nes_read_indexed(nesdev, | ||
1182 | NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))); | ||
1183 | |||
1184 | kfree(nesdev->nes_cqp_requests); | ||
1185 | |||
1186 | /* Free the control structures */ | ||
1187 | pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase, | ||
1188 | nesdev->cqp.sq_pbase); | ||
1189 | |||
1190 | return 0; | ||
1191 | } | ||
1192 | |||
1193 | |||
1194 | /** | ||
1195 | * nes_init_phy | ||
1196 | */ | ||
1197 | int nes_init_phy(struct nes_device *nesdev) | ||
1198 | { | ||
1199 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1200 | u32 counter = 0; | ||
1201 | u32 mac_index = nesdev->mac_index; | ||
1202 | u32 tx_config; | ||
1203 | u16 phy_data; | ||
1204 | |||
1205 | if (nesadapter->OneG_Mode) { | ||
1206 | nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); | ||
1207 | if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { | ||
1208 | printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__); | ||
1209 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); | ||
1210 | tx_config |= 0x04; | ||
1211 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); | ||
1212 | } | ||
1213 | |||
1214 | nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data); | ||
1215 | nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n", | ||
1216 | nesadapter->phy_index[mac_index], phy_data); | ||
1217 | nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000); | ||
1218 | |||
1219 | /* Reset the PHY */ | ||
1220 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000); | ||
1221 | udelay(100); | ||
1222 | counter = 0; | ||
1223 | do { | ||
1224 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); | ||
1225 | nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); | ||
1226 | if (counter++ > 100) break; | ||
1227 | } while (phy_data & 0x8000); | ||
1228 | |||
1229 | /* Setting no phy loopback */ | ||
1230 | phy_data &= 0xbfff; | ||
1231 | phy_data |= 0x1140; | ||
1232 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data); | ||
1233 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); | ||
1234 | nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); | ||
1235 | |||
1236 | nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data); | ||
1237 | nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data); | ||
1238 | |||
1239 | nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data); | ||
1240 | nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data); | ||
1241 | |||
1242 | /* Setting the interrupt mask */ | ||
1243 | nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); | ||
1244 | nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); | ||
1245 | nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee); | ||
1246 | |||
1247 | nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); | ||
1248 | nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); | ||
1249 | |||
1250 | /* turning on flow control */ | ||
1251 | nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); | ||
1252 | nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); | ||
1253 | nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], | ||
1254 | (phy_data & ~(0x03E0)) | 0xc00); | ||
1255 | /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], | ||
1256 | phy_data | 0xc00); */ | ||
1257 | nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); | ||
1258 | nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); | ||
1259 | |||
1260 | nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data); | ||
1261 | nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data); | ||
1262 | /* Clear Half duplex */ | ||
1263 | nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], | ||
1264 | phy_data & ~(0x0100)); | ||
1265 | nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data); | ||
1266 | nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data); | ||
1267 | |||
1268 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); | ||
1269 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300); | ||
1270 | } else { | ||
1271 | if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) { | ||
1272 | /* setup 10G MDIO operation */ | ||
1273 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); | ||
1274 | tx_config |= 0x14; | ||
1275 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); | ||
1276 | } | ||
1277 | } | ||
1278 | return 0; | ||
1279 | } | ||
1280 | |||
1281 | |||
1282 | /** | ||
1283 | * nes_replenish_nic_rq | ||
1284 | */ | ||
1285 | static void nes_replenish_nic_rq(struct nes_vnic *nesvnic) | ||
1286 | { | ||
1287 | unsigned long flags; | ||
1288 | dma_addr_t bus_address; | ||
1289 | struct sk_buff *skb; | ||
1290 | struct nes_hw_nic_rq_wqe *nic_rqe; | ||
1291 | struct nes_hw_nic *nesnic; | ||
1292 | struct nes_device *nesdev; | ||
1293 | u32 rx_wqes_posted = 0; | ||
1294 | |||
1295 | nesnic = &nesvnic->nic; | ||
1296 | nesdev = nesvnic->nesdev; | ||
1297 | spin_lock_irqsave(&nesnic->rq_lock, flags); | ||
1298 | if (nesnic->replenishing_rq !=0) { | ||
1299 | if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) && | ||
1300 | (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) { | ||
1301 | atomic_set(&nesvnic->rx_skb_timer_running, 1); | ||
1302 | spin_unlock_irqrestore(&nesnic->rq_lock, flags); | ||
1303 | nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */ | ||
1304 | add_timer(&nesvnic->rq_wqes_timer); | ||
1305 | } else | ||
1306 | spin_unlock_irqrestore(&nesnic->rq_lock, flags); | ||
1307 | return; | ||
1308 | } | ||
1309 | nesnic->replenishing_rq = 1; | ||
1310 | spin_unlock_irqrestore(&nesnic->rq_lock, flags); | ||
1311 | do { | ||
1312 | skb = dev_alloc_skb(nesvnic->max_frame_size); | ||
1313 | if (skb) { | ||
1314 | skb->dev = nesvnic->netdev; | ||
1315 | |||
1316 | bus_address = pci_map_single(nesdev->pcidev, | ||
1317 | skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
1318 | |||
1319 | nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head]; | ||
1320 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = | ||
1321 | cpu_to_le32(nesvnic->max_frame_size); | ||
1322 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; | ||
1323 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = | ||
1324 | cpu_to_le32((u32)bus_address); | ||
1325 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = | ||
1326 | cpu_to_le32((u32)((u64)bus_address >> 32)); | ||
1327 | nesnic->rx_skb[nesnic->rq_head] = skb; | ||
1328 | nesnic->rq_head++; | ||
1329 | nesnic->rq_head &= nesnic->rq_size - 1; | ||
1330 | atomic_dec(&nesvnic->rx_skbs_needed); | ||
1331 | barrier(); | ||
1332 | if (++rx_wqes_posted == 255) { | ||
1333 | nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id); | ||
1334 | rx_wqes_posted = 0; | ||
1335 | } | ||
1336 | } else { | ||
1337 | spin_lock_irqsave(&nesnic->rq_lock, flags); | ||
1338 | if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) && | ||
1339 | (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) { | ||
1340 | atomic_set(&nesvnic->rx_skb_timer_running, 1); | ||
1341 | spin_unlock_irqrestore(&nesnic->rq_lock, flags); | ||
1342 | nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */ | ||
1343 | add_timer(&nesvnic->rq_wqes_timer); | ||
1344 | } else | ||
1345 | spin_unlock_irqrestore(&nesnic->rq_lock, flags); | ||
1346 | break; | ||
1347 | } | ||
1348 | } while (atomic_read(&nesvnic->rx_skbs_needed)); | ||
1349 | barrier(); | ||
1350 | if (rx_wqes_posted) | ||
1351 | nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id); | ||
1352 | nesnic->replenishing_rq = 0; | ||
1353 | } | ||
1354 | |||
1355 | |||
1356 | /** | ||
1357 | * nes_rq_wqes_timeout | ||
1358 | */ | ||
1359 | static void nes_rq_wqes_timeout(unsigned long parm) | ||
1360 | { | ||
1361 | struct nes_vnic *nesvnic = (struct nes_vnic *)parm; | ||
1362 | printk("%s: Timer fired.\n", __FUNCTION__); | ||
1363 | atomic_set(&nesvnic->rx_skb_timer_running, 0); | ||
1364 | if (atomic_read(&nesvnic->rx_skbs_needed)) | ||
1365 | nes_replenish_nic_rq(nesvnic); | ||
1366 | } | ||
1367 | |||
1368 | |||
1369 | /** | ||
1370 | * nes_init_nic_qp | ||
1371 | */ | ||
1372 | int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) | ||
1373 | { | ||
1374 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1375 | struct nes_hw_nic_sq_wqe *nic_sqe; | ||
1376 | struct nes_hw_nic_qp_context *nic_context; | ||
1377 | struct sk_buff *skb; | ||
1378 | struct nes_hw_nic_rq_wqe *nic_rqe; | ||
1379 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1380 | unsigned long flags; | ||
1381 | void *vmem; | ||
1382 | dma_addr_t pmem; | ||
1383 | u64 u64temp; | ||
1384 | int ret; | ||
1385 | u32 cqp_head; | ||
1386 | u32 counter; | ||
1387 | u32 wqe_count; | ||
1388 | u8 jumbomode=0; | ||
1389 | |||
1390 | /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */ | ||
1391 | nesvnic->nic_mem_size = 256 + | ||
1392 | (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)) + | ||
1393 | (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)) + | ||
1394 | (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)) + | ||
1395 | (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) + | ||
1396 | sizeof(struct nes_hw_nic_qp_context); | ||
1397 | |||
1398 | nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size, | ||
1399 | &nesvnic->nic_pbase); | ||
1400 | if (!nesvnic->nic_vbase) { | ||
1401 | nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n"); | ||
1402 | return -ENOMEM; | ||
1403 | } | ||
1404 | memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size); | ||
1405 | nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n", | ||
1406 | nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size); | ||
1407 | |||
1408 | vmem = (void *)(((unsigned long)nesvnic->nic_vbase + (256 - 1)) & | ||
1409 | ~(unsigned long)(256 - 1)); | ||
1410 | pmem = (dma_addr_t)(((unsigned long long)nesvnic->nic_pbase + (256 - 1)) & | ||
1411 | ~(unsigned long long)(256 - 1)); | ||
1412 | |||
1413 | /* Setup the first Fragment buffers */ | ||
1414 | nesvnic->nic.first_frag_vbase = vmem; | ||
1415 | |||
1416 | for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) { | ||
1417 | nesvnic->nic.frag_paddr[counter] = pmem; | ||
1418 | pmem += sizeof(struct nes_first_frag); | ||
1419 | } | ||
1420 | |||
1421 | /* setup the SQ */ | ||
1422 | vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)); | ||
1423 | |||
1424 | nesvnic->nic.sq_vbase = (void *)vmem; | ||
1425 | nesvnic->nic.sq_pbase = pmem; | ||
1426 | nesvnic->nic.sq_head = 0; | ||
1427 | nesvnic->nic.sq_tail = 0; | ||
1428 | nesvnic->nic.sq_size = NES_NIC_WQ_SIZE; | ||
1429 | for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) { | ||
1430 | nic_sqe = &nesvnic->nic.sq_vbase[counter]; | ||
1431 | nic_sqe->wqe_words[NES_NIC_SQ_WQE_MISC_IDX] = | ||
1432 | cpu_to_le32(NES_NIC_SQ_WQE_DISABLE_CHKSUM | | ||
1433 | NES_NIC_SQ_WQE_COMPLETION); | ||
1434 | nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX] = | ||
1435 | cpu_to_le32((u32)NES_FIRST_FRAG_SIZE << 16); | ||
1436 | nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX] = | ||
1437 | cpu_to_le32((u32)nesvnic->nic.frag_paddr[counter]); | ||
1438 | nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX] = | ||
1439 | cpu_to_le32((u32)((u64)nesvnic->nic.frag_paddr[counter] >> 32)); | ||
1440 | } | ||
1441 | |||
1442 | nesvnic->get_cqp_request = nes_get_cqp_request; | ||
1443 | nesvnic->post_cqp_request = nes_post_cqp_request; | ||
1444 | nesvnic->mcrq_mcast_filter = NULL; | ||
1445 | |||
1446 | spin_lock_init(&nesvnic->nic.sq_lock); | ||
1447 | spin_lock_init(&nesvnic->nic.rq_lock); | ||
1448 | |||
1449 | /* setup the RQ */ | ||
1450 | vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)); | ||
1451 | pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)); | ||
1452 | |||
1453 | |||
1454 | nesvnic->nic.rq_vbase = vmem; | ||
1455 | nesvnic->nic.rq_pbase = pmem; | ||
1456 | nesvnic->nic.rq_head = 0; | ||
1457 | nesvnic->nic.rq_tail = 0; | ||
1458 | nesvnic->nic.rq_size = NES_NIC_WQ_SIZE; | ||
1459 | |||
1460 | /* setup the CQ */ | ||
1461 | vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)); | ||
1462 | pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)); | ||
1463 | |||
1464 | if (nesdev->nesadapter->netdev_count > 2) | ||
1465 | nesvnic->mcrq_qp_id = nesvnic->nic_index + 32; | ||
1466 | else | ||
1467 | nesvnic->mcrq_qp_id = nesvnic->nic.qp_id + 4; | ||
1468 | |||
1469 | nesvnic->nic_cq.cq_vbase = vmem; | ||
1470 | nesvnic->nic_cq.cq_pbase = pmem; | ||
1471 | nesvnic->nic_cq.cq_head = 0; | ||
1472 | nesvnic->nic_cq.cq_size = NES_NIC_WQ_SIZE * 2; | ||
1473 | |||
1474 | nesvnic->nic_cq.ce_handler = nes_nic_napi_ce_handler; | ||
1475 | |||
1476 | /* Send CreateCQ request to CQP */ | ||
1477 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1478 | cqp_head = nesdev->cqp.sq_head; | ||
1479 | |||
1480 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1481 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1482 | |||
1483 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32( | ||
1484 | NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | | ||
1485 | ((u32)nesvnic->nic_cq.cq_size << 16)); | ||
1486 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32( | ||
1487 | nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16)); | ||
1488 | u64temp = (u64)nesvnic->nic_cq.cq_pbase; | ||
1489 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
1490 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; | ||
1491 | u64temp = (unsigned long)&nesvnic->nic_cq; | ||
1492 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1)); | ||
1493 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = | ||
1494 | cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); | ||
1495 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; | ||
1496 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
1497 | cqp_head = 0; | ||
1498 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1499 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1500 | |||
1501 | /* Send CreateQP request to CQP */ | ||
1502 | nic_context = (void *)(&nesvnic->nic_cq.cq_vbase[nesvnic->nic_cq.cq_size]); | ||
1503 | nic_context->context_words[NES_NIC_CTX_MISC_IDX] = | ||
1504 | cpu_to_le32((u32)NES_NIC_CTX_SIZE | | ||
1505 | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12)); | ||
1506 | nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n", | ||
1507 | nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE), | ||
1508 | nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE)); | ||
1509 | if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) { | ||
1510 | nic_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE); | ||
1511 | } | ||
1512 | |||
1513 | u64temp = (u64)nesvnic->nic.sq_pbase; | ||
1514 | nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp); | ||
1515 | nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); | ||
1516 | u64temp = (u64)nesvnic->nic.rq_pbase; | ||
1517 | nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp); | ||
1518 | nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); | ||
1519 | |||
1520 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP | | ||
1521 | NES_CQP_QP_TYPE_NIC); | ||
1522 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesvnic->nic.qp_id); | ||
1523 | u64temp = (u64)nesvnic->nic_cq.cq_pbase + | ||
1524 | (nesvnic->nic_cq.cq_size * sizeof(struct nes_hw_nic_cqe)); | ||
1525 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); | ||
1526 | |||
1527 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
1528 | cqp_head = 0; | ||
1529 | nesdev->cqp.sq_head = cqp_head; | ||
1530 | |||
1531 | barrier(); | ||
1532 | |||
1533 | /* Ring doorbell (2 WQEs) */ | ||
1534 | nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); | ||
1535 | |||
1536 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1537 | nes_debug(NES_DBG_INIT, "Waiting for create NIC QP%u to complete.\n", | ||
1538 | nesvnic->nic.qp_id); | ||
1539 | |||
1540 | ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), | ||
1541 | NES_EVENT_TIMEOUT); | ||
1542 | nes_debug(NES_DBG_INIT, "Create NIC QP%u completed, wait_event_timeout ret = %u.\n", | ||
1543 | nesvnic->nic.qp_id, ret); | ||
1544 | if (!ret) { | ||
1545 | nes_debug(NES_DBG_INIT, "NIC QP%u create timeout expired\n", nesvnic->nic.qp_id); | ||
1546 | pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase, | ||
1547 | nesvnic->nic_pbase); | ||
1548 | return -EIO; | ||
1549 | } | ||
1550 | |||
1551 | /* Populate the RQ */ | ||
1552 | for (counter = 0; counter < (NES_NIC_WQ_SIZE - 1); counter++) { | ||
1553 | skb = dev_alloc_skb(nesvnic->max_frame_size); | ||
1554 | if (!skb) { | ||
1555 | nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name); | ||
1556 | |||
1557 | nes_destroy_nic_qp(nesvnic); | ||
1558 | return -ENOMEM; | ||
1559 | } | ||
1560 | |||
1561 | skb->dev = netdev; | ||
1562 | |||
1563 | pmem = pci_map_single(nesdev->pcidev, skb->data, | ||
1564 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
1565 | |||
1566 | nic_rqe = &nesvnic->nic.rq_vbase[counter]; | ||
1567 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size); | ||
1568 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; | ||
1569 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem); | ||
1570 | nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32)); | ||
1571 | nesvnic->nic.rx_skb[counter] = skb; | ||
1572 | } | ||
1573 | |||
1574 | wqe_count = NES_NIC_WQ_SIZE - 1; | ||
1575 | nesvnic->nic.rq_head = wqe_count; | ||
1576 | barrier(); | ||
1577 | do { | ||
1578 | counter = min(wqe_count, ((u32)255)); | ||
1579 | wqe_count -= counter; | ||
1580 | nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id); | ||
1581 | } while (wqe_count); | ||
1582 | init_timer(&nesvnic->rq_wqes_timer); | ||
1583 | nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout; | ||
1584 | nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic; | ||
1585 | nes_debug(NES_DBG_INIT, "NAPI support Enabled\n"); | ||
1586 | |||
1587 | if (nesdev->nesadapter->et_use_adaptive_rx_coalesce) | ||
1588 | { | ||
1589 | nes_nic_init_timer(nesdev); | ||
1590 | if (netdev->mtu > 1500) | ||
1591 | jumbomode = 1; | ||
1592 | nes_nic_init_timer_defaults(nesdev, jumbomode); | ||
1593 | } | ||
1594 | |||
1595 | return 0; | ||
1596 | } | ||
1597 | |||
1598 | |||
1599 | /** | ||
1600 | * nes_destroy_nic_qp | ||
1601 | */ | ||
1602 | void nes_destroy_nic_qp(struct nes_vnic *nesvnic) | ||
1603 | { | ||
1604 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1605 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1606 | struct nes_hw_nic_rq_wqe *nic_rqe; | ||
1607 | u64 wqe_frag; | ||
1608 | u32 cqp_head; | ||
1609 | unsigned long flags; | ||
1610 | int ret; | ||
1611 | |||
1612 | /* Free remaining NIC receive buffers */ | ||
1613 | while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { | ||
1614 | nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail]; | ||
1615 | wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); | ||
1616 | wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; | ||
1617 | pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag, | ||
1618 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
1619 | dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]); | ||
1620 | nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1); | ||
1621 | } | ||
1622 | |||
1623 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1624 | |||
1625 | /* Destroy NIC QP */ | ||
1626 | cqp_head = nesdev->cqp.sq_head; | ||
1627 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1628 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1629 | |||
1630 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1631 | (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC)); | ||
1632 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1633 | nesvnic->nic.qp_id); | ||
1634 | |||
1635 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
1636 | cqp_head = 0; | ||
1637 | |||
1638 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
1639 | |||
1640 | /* Destroy NIC CQ */ | ||
1641 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1642 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
1643 | (NES_CQP_DESTROY_CQ | ((u32)nesvnic->nic_cq.cq_size << 16))); | ||
1644 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1645 | (nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16))); | ||
1646 | |||
1647 | if (++cqp_head >= nesdev->cqp.sq_size) | ||
1648 | cqp_head = 0; | ||
1649 | |||
1650 | nesdev->cqp.sq_head = cqp_head; | ||
1651 | barrier(); | ||
1652 | |||
1653 | /* Ring doorbell (2 WQEs) */ | ||
1654 | nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); | ||
1655 | |||
1656 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1657 | nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u," | ||
1658 | " cqp.sq_tail=%u, cqp.sq_size=%u\n", | ||
1659 | cqp_head, nesdev->cqp.sq_head, | ||
1660 | nesdev->cqp.sq_tail, nesdev->cqp.sq_size); | ||
1661 | |||
1662 | ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), | ||
1663 | NES_EVENT_TIMEOUT); | ||
1664 | |||
1665 | nes_debug(NES_DBG_SHUTDOWN, "Destroy NIC QP returned, wait_event_timeout ret = %u, cqp_head=%u," | ||
1666 | " cqp.sq_head=%u, cqp.sq_tail=%u\n", | ||
1667 | ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail); | ||
1668 | if (!ret) { | ||
1669 | nes_debug(NES_DBG_SHUTDOWN, "NIC QP%u destroy timeout expired\n", | ||
1670 | nesvnic->nic.qp_id); | ||
1671 | } | ||
1672 | |||
1673 | pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase, | ||
1674 | nesvnic->nic_pbase); | ||
1675 | } | ||
1676 | |||
1677 | /** | ||
1678 | * nes_napi_isr | ||
1679 | */ | ||
1680 | int nes_napi_isr(struct nes_device *nesdev) | ||
1681 | { | ||
1682 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1683 | u32 int_stat; | ||
1684 | |||
1685 | if (nesdev->napi_isr_ran) { | ||
1686 | /* interrupt status has already been read in ISR */ | ||
1687 | int_stat = nesdev->int_stat; | ||
1688 | } else { | ||
1689 | int_stat = nes_read32(nesdev->regs + NES_INT_STAT); | ||
1690 | nesdev->int_stat = int_stat; | ||
1691 | nesdev->napi_isr_ran = 1; | ||
1692 | } | ||
1693 | |||
1694 | int_stat &= nesdev->int_req; | ||
1695 | /* iff NIC, process here, else wait for DPC */ | ||
1696 | if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) { | ||
1697 | nesdev->napi_isr_ran = 0; | ||
1698 | nes_write32(nesdev->regs+NES_INT_STAT, | ||
1699 | (int_stat & | ||
1700 | ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3))); | ||
1701 | |||
1702 | /* Process the CEQs */ | ||
1703 | nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]); | ||
1704 | |||
1705 | if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) && | ||
1706 | (!nesadapter->et_use_adaptive_rx_coalesce)) || | ||
1707 | ((nesadapter->et_use_adaptive_rx_coalesce) && | ||
1708 | (nesdev->deepcq_count > nesadapter->et_pkt_rate_low)))) ) { | ||
1709 | if ((nesdev->int_req & NES_INT_TIMER) == 0) { | ||
1710 | /* Enable Periodic timer interrupts */ | ||
1711 | nesdev->int_req |= NES_INT_TIMER; | ||
1712 | /* ack any pending periodic timer interrupts so we don't get an immediate interrupt */ | ||
1713 | /* TODO: need to also ack other unused periodic timer values, get from nesadapter */ | ||
1714 | nes_write32(nesdev->regs+NES_TIMER_STAT, | ||
1715 | nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req)); | ||
1716 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, | ||
1717 | ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER)); | ||
1718 | } | ||
1719 | |||
1720 | if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) | ||
1721 | { | ||
1722 | nes_nic_init_timer(nesdev); | ||
1723 | } | ||
1724 | /* Enable interrupts, except CEQs */ | ||
1725 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req)); | ||
1726 | } else { | ||
1727 | /* Enable interrupts, make sure timer is off */ | ||
1728 | nesdev->int_req &= ~NES_INT_TIMER; | ||
1729 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | ||
1730 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | ||
1731 | nesadapter->tune_timer.timer_in_use_old = 0; | ||
1732 | } | ||
1733 | nesdev->deepcq_count = 0; | ||
1734 | return 1; | ||
1735 | } else { | ||
1736 | return 0; | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1740 | |||
1741 | /** | ||
1742 | * nes_dpc | ||
1743 | */ | ||
1744 | void nes_dpc(unsigned long param) | ||
1745 | { | ||
1746 | struct nes_device *nesdev = (struct nes_device *)param; | ||
1747 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1748 | u32 counter; | ||
1749 | u32 loop_counter = 0; | ||
1750 | u32 int_status_bit; | ||
1751 | u32 int_stat; | ||
1752 | u32 timer_stat; | ||
1753 | u32 temp_int_stat; | ||
1754 | u32 intf_int_stat; | ||
1755 | u32 debug_error; | ||
1756 | u32 processed_intf_int = 0; | ||
1757 | u16 processed_timer_int = 0; | ||
1758 | u16 completion_ints = 0; | ||
1759 | u16 timer_ints = 0; | ||
1760 | |||
1761 | /* nes_debug(NES_DBG_ISR, "\n"); */ | ||
1762 | |||
1763 | do { | ||
1764 | timer_stat = 0; | ||
1765 | if (nesdev->napi_isr_ran) { | ||
1766 | nesdev->napi_isr_ran = 0; | ||
1767 | int_stat = nesdev->int_stat; | ||
1768 | } else | ||
1769 | int_stat = nes_read32(nesdev->regs+NES_INT_STAT); | ||
1770 | if (processed_intf_int != 0) | ||
1771 | int_stat &= nesdev->int_req & ~NES_INT_INTF; | ||
1772 | else | ||
1773 | int_stat &= nesdev->int_req; | ||
1774 | if (processed_timer_int == 0) { | ||
1775 | processed_timer_int = 1; | ||
1776 | if (int_stat & NES_INT_TIMER) { | ||
1777 | timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT); | ||
1778 | if ((timer_stat & nesdev->timer_int_req) == 0) { | ||
1779 | int_stat &= ~NES_INT_TIMER; | ||
1780 | } | ||
1781 | } | ||
1782 | } else { | ||
1783 | int_stat &= ~NES_INT_TIMER; | ||
1784 | } | ||
1785 | |||
1786 | if (int_stat) { | ||
1787 | if (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0| | ||
1788 | NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)) { | ||
1789 | /* Ack the interrupts */ | ||
1790 | nes_write32(nesdev->regs+NES_INT_STAT, | ||
1791 | (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0| | ||
1792 | NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3))); | ||
1793 | } | ||
1794 | |||
1795 | temp_int_stat = int_stat; | ||
1796 | for (counter = 0, int_status_bit = 1; counter < 16; counter++) { | ||
1797 | if (int_stat & int_status_bit) { | ||
1798 | nes_process_ceq(nesdev, &nesadapter->ceq[counter]); | ||
1799 | temp_int_stat &= ~int_status_bit; | ||
1800 | completion_ints = 1; | ||
1801 | } | ||
1802 | if (!(temp_int_stat & 0x0000ffff)) | ||
1803 | break; | ||
1804 | int_status_bit <<= 1; | ||
1805 | } | ||
1806 | |||
1807 | /* Process the AEQ for this pci function */ | ||
1808 | int_status_bit = 1 << (16 + PCI_FUNC(nesdev->pcidev->devfn)); | ||
1809 | if (int_stat & int_status_bit) { | ||
1810 | nes_process_aeq(nesdev, &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]); | ||
1811 | } | ||
1812 | |||
1813 | /* Process the MAC interrupt for this pci function */ | ||
1814 | int_status_bit = 1 << (24 + nesdev->mac_index); | ||
1815 | if (int_stat & int_status_bit) { | ||
1816 | nes_process_mac_intr(nesdev, nesdev->mac_index); | ||
1817 | } | ||
1818 | |||
1819 | if (int_stat & NES_INT_TIMER) { | ||
1820 | if (timer_stat & nesdev->timer_int_req) { | ||
1821 | nes_write32(nesdev->regs + NES_TIMER_STAT, | ||
1822 | (timer_stat & nesdev->timer_int_req) | | ||
1823 | ~(nesdev->nesadapter->timer_int_req)); | ||
1824 | timer_ints = 1; | ||
1825 | } | ||
1826 | } | ||
1827 | |||
1828 | if (int_stat & NES_INT_INTF) { | ||
1829 | processed_intf_int = 1; | ||
1830 | intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); | ||
1831 | intf_int_stat &= nesdev->intf_int_req; | ||
1832 | if (NES_INTF_INT_CRITERR & intf_int_stat) { | ||
1833 | debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS); | ||
1834 | printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n", | ||
1835 | (u16)debug_error); | ||
1836 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS, | ||
1837 | 0x01010000 | (debug_error & 0x0000ffff)); | ||
1838 | /* BUG(); */ | ||
1839 | if (crit_err_count++ > 10) | ||
1840 | nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17); | ||
1841 | } | ||
1842 | if (NES_INTF_INT_PCIERR & intf_int_stat) { | ||
1843 | printk(KERN_ERR PFX "PCI Error reported by device!!!\n"); | ||
1844 | BUG(); | ||
1845 | } | ||
1846 | if (NES_INTF_INT_AEQ_OFLOW & intf_int_stat) { | ||
1847 | printk(KERN_ERR PFX "AEQ Overflow reported by device!!!\n"); | ||
1848 | BUG(); | ||
1849 | } | ||
1850 | nes_write32(nesdev->regs+NES_INTF_INT_STAT, intf_int_stat); | ||
1851 | } | ||
1852 | |||
1853 | if (int_stat & NES_INT_TSW) { | ||
1854 | } | ||
1855 | } | ||
1856 | /* Don't use the interface interrupt bit stay in loop */ | ||
1857 | int_stat &= ~NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0| | ||
1858 | NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3; | ||
1859 | } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS)); | ||
1860 | |||
1861 | if (timer_ints == 1) { | ||
1862 | if ((nesadapter->et_rx_coalesce_usecs_irq) || (nesadapter->et_use_adaptive_rx_coalesce)) { | ||
1863 | if (completion_ints == 0) { | ||
1864 | nesdev->timer_only_int_count++; | ||
1865 | if (nesdev->timer_only_int_count>=nesadapter->timer_int_limit) { | ||
1866 | nesdev->timer_only_int_count = 0; | ||
1867 | nesdev->int_req &= ~NES_INT_TIMER; | ||
1868 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | ||
1869 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | ||
1870 | nesdev->nesadapter->tune_timer.timer_in_use_old = 0; | ||
1871 | } else { | ||
1872 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); | ||
1873 | } | ||
1874 | } else { | ||
1875 | if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) | ||
1876 | { | ||
1877 | nes_nic_init_timer(nesdev); | ||
1878 | } | ||
1879 | nesdev->timer_only_int_count = 0; | ||
1880 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); | ||
1881 | } | ||
1882 | } else { | ||
1883 | nesdev->timer_only_int_count = 0; | ||
1884 | nesdev->int_req &= ~NES_INT_TIMER; | ||
1885 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | ||
1886 | nes_write32(nesdev->regs+NES_TIMER_STAT, | ||
1887 | nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req)); | ||
1888 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | ||
1889 | } | ||
1890 | } else { | ||
1891 | if ( (completion_ints == 1) && | ||
1892 | (((nesadapter->et_rx_coalesce_usecs_irq) && | ||
1893 | (!nesadapter->et_use_adaptive_rx_coalesce)) || | ||
1894 | ((nesdev->deepcq_count > nesadapter->et_pkt_rate_low) && | ||
1895 | (nesadapter->et_use_adaptive_rx_coalesce) )) ) { | ||
1896 | /* nes_debug(NES_DBG_ISR, "Enabling periodic timer interrupt.\n" ); */ | ||
1897 | nesdev->timer_only_int_count = 0; | ||
1898 | nesdev->int_req |= NES_INT_TIMER; | ||
1899 | nes_write32(nesdev->regs+NES_TIMER_STAT, | ||
1900 | nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req)); | ||
1901 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, | ||
1902 | ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER)); | ||
1903 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req)); | ||
1904 | } else { | ||
1905 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | ||
1906 | } | ||
1907 | } | ||
1908 | nesdev->deepcq_count = 0; | ||
1909 | } | ||
1910 | |||
1911 | |||
1912 | /** | ||
1913 | * nes_process_ceq | ||
1914 | */ | ||
1915 | void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq) | ||
1916 | { | ||
1917 | u64 u64temp; | ||
1918 | struct nes_hw_cq *cq; | ||
1919 | u32 head; | ||
1920 | u32 ceq_size; | ||
1921 | |||
1922 | /* nes_debug(NES_DBG_CQ, "\n"); */ | ||
1923 | head = ceq->ceq_head; | ||
1924 | ceq_size = ceq->ceq_size; | ||
1925 | |||
1926 | do { | ||
1927 | if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) & | ||
1928 | NES_CEQE_VALID) { | ||
1929 | u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX])))<<32) | | ||
1930 | ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX]))); | ||
1931 | u64temp <<= 1; | ||
1932 | cq = *((struct nes_hw_cq **)&u64temp); | ||
1933 | /* nes_debug(NES_DBG_CQ, "pCQ = %p\n", cq); */ | ||
1934 | barrier(); | ||
1935 | ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX] = 0; | ||
1936 | |||
1937 | /* call the event handler */ | ||
1938 | cq->ce_handler(nesdev, cq); | ||
1939 | |||
1940 | if (++head >= ceq_size) | ||
1941 | head = 0; | ||
1942 | } else { | ||
1943 | break; | ||
1944 | } | ||
1945 | |||
1946 | } while (1); | ||
1947 | |||
1948 | ceq->ceq_head = head; | ||
1949 | } | ||
1950 | |||
1951 | |||
1952 | /** | ||
1953 | * nes_process_aeq | ||
1954 | */ | ||
1955 | void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq) | ||
1956 | { | ||
1957 | // u64 u64temp; | ||
1958 | u32 head; | ||
1959 | u32 aeq_size; | ||
1960 | u32 aeqe_misc; | ||
1961 | u32 aeqe_cq_id; | ||
1962 | struct nes_hw_aeqe volatile *aeqe; | ||
1963 | |||
1964 | head = aeq->aeq_head; | ||
1965 | aeq_size = aeq->aeq_size; | ||
1966 | |||
1967 | do { | ||
1968 | aeqe = &aeq->aeq_vbase[head]; | ||
1969 | if ((le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]) & NES_AEQE_VALID) == 0) | ||
1970 | break; | ||
1971 | aeqe_misc = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); | ||
1972 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); | ||
1973 | if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) { | ||
1974 | if (aeqe_cq_id >= NES_FIRST_QPN) { | ||
1975 | /* dealing with an accelerated QP related AE */ | ||
1976 | // u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])))<<32) | | ||
1977 | // ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]))); | ||
1978 | nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe); | ||
1979 | } else { | ||
1980 | /* TODO: dealing with a CQP related AE */ | ||
1981 | nes_debug(NES_DBG_AEQ, "Processing CQP related AE, misc = 0x%04X\n", | ||
1982 | (u16)(aeqe_misc >> 16)); | ||
1983 | } | ||
1984 | } | ||
1985 | |||
1986 | aeqe->aeqe_words[NES_AEQE_MISC_IDX] = 0; | ||
1987 | |||
1988 | if (++head >= aeq_size) | ||
1989 | head = 0; | ||
1990 | } | ||
1991 | while (1); | ||
1992 | aeq->aeq_head = head; | ||
1993 | } | ||
1994 | |||
1995 | static void nes_reset_link(struct nes_device *nesdev, u32 mac_index) | ||
1996 | { | ||
1997 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1998 | u32 reset_value; | ||
1999 | u32 i=0; | ||
2000 | u32 u32temp; | ||
2001 | |||
2002 | if (nesadapter->hw_rev == NE020_REV) { | ||
2003 | return; | ||
2004 | } | ||
2005 | mh_detected++; | ||
2006 | |||
2007 | reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); | ||
2008 | |||
2009 | if ((mac_index == 0) || ((mac_index == 1) && (nesadapter->OneG_Mode))) | ||
2010 | reset_value |= 0x0000001d; | ||
2011 | else | ||
2012 | reset_value |= 0x0000002d; | ||
2013 | |||
2014 | if (4 <= (nesadapter->link_interrupt_count[mac_index] / ((u16)NES_MAX_LINK_INTERRUPTS))) { | ||
2015 | if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) { | ||
2016 | nesadapter->link_interrupt_count[0] = 0; | ||
2017 | nesadapter->link_interrupt_count[1] = 0; | ||
2018 | u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); | ||
2019 | if (0x00000040 & u32temp) | ||
2020 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); | ||
2021 | else | ||
2022 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8); | ||
2023 | |||
2024 | reset_value |= 0x0000003d; | ||
2025 | } | ||
2026 | nesadapter->link_interrupt_count[mac_index] = 0; | ||
2027 | } | ||
2028 | |||
2029 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); | ||
2030 | |||
2031 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) | ||
2032 | & 0x00000040) != 0x00000040) && (i++ < 5000)); | ||
2033 | |||
2034 | if (0x0000003d == (reset_value & 0x0000003d)) { | ||
2035 | u32 pcs_control_status0, pcs_control_status1; | ||
2036 | |||
2037 | for (i = 0; i < 10; i++) { | ||
2038 | pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0); | ||
2039 | pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200); | ||
2040 | if (((0x0F000000 == (pcs_control_status0 & 0x0F000000)) | ||
2041 | && (pcs_control_status0 & 0x00100000)) | ||
2042 | || ((0x0F000000 == (pcs_control_status1 & 0x0F000000)) | ||
2043 | && (pcs_control_status1 & 0x00100000))) | ||
2044 | continue; | ||
2045 | else | ||
2046 | break; | ||
2047 | } | ||
2048 | if (10 == i) { | ||
2049 | u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); | ||
2050 | if (0x00000040 & u32temp) | ||
2051 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); | ||
2052 | else | ||
2053 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8); | ||
2054 | |||
2055 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value); | ||
2056 | |||
2057 | while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) | ||
2058 | & 0x00000040) != 0x00000040) && (i++ < 5000)); | ||
2059 | } | ||
2060 | } | ||
2061 | } | ||
2062 | |||
2063 | /** | ||
2064 | * nes_process_mac_intr | ||
2065 | */ | ||
2066 | void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | ||
2067 | { | ||
2068 | unsigned long flags; | ||
2069 | u32 pcs_control_status; | ||
2070 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
2071 | struct nes_vnic *nesvnic; | ||
2072 | u32 mac_status; | ||
2073 | u32 mac_index = nesdev->mac_index; | ||
2074 | u32 u32temp; | ||
2075 | u16 phy_data; | ||
2076 | u16 temp_phy_data; | ||
2077 | |||
2078 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
2079 | if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { | ||
2080 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
2081 | return; | ||
2082 | } | ||
2083 | nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; | ||
2084 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
2085 | |||
2086 | /* ack the MAC interrupt */ | ||
2087 | mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); | ||
2088 | /* Clear the interrupt */ | ||
2089 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200), mac_status); | ||
2090 | |||
2091 | nes_debug(NES_DBG_PHY, "MAC%u interrupt status = 0x%X.\n", mac_number, mac_status); | ||
2092 | |||
2093 | if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { | ||
2094 | nesdev->link_status_interrupts++; | ||
2095 | if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) { | ||
2096 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
2097 | nes_reset_link(nesdev, mac_index); | ||
2098 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
2099 | } | ||
2100 | /* read the PHY interrupt status register */ | ||
2101 | if (nesadapter->OneG_Mode) { | ||
2102 | do { | ||
2103 | nes_read_1G_phy_reg(nesdev, 0x1a, | ||
2104 | nesadapter->phy_index[mac_index], &phy_data); | ||
2105 | nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1a = 0x%X.\n", | ||
2106 | nesadapter->phy_index[mac_index], phy_data); | ||
2107 | } while (phy_data&0x8000); | ||
2108 | |||
2109 | temp_phy_data = 0; | ||
2110 | do { | ||
2111 | nes_read_1G_phy_reg(nesdev, 0x11, | ||
2112 | nesadapter->phy_index[mac_index], &phy_data); | ||
2113 | nes_debug(NES_DBG_PHY, "Phy%d data from register 0x11 = 0x%X.\n", | ||
2114 | nesadapter->phy_index[mac_index], phy_data); | ||
2115 | if (temp_phy_data == phy_data) | ||
2116 | break; | ||
2117 | temp_phy_data = phy_data; | ||
2118 | } while (1); | ||
2119 | |||
2120 | nes_read_1G_phy_reg(nesdev, 0x1e, | ||
2121 | nesadapter->phy_index[mac_index], &phy_data); | ||
2122 | nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1e = 0x%X.\n", | ||
2123 | nesadapter->phy_index[mac_index], phy_data); | ||
2124 | |||
2125 | nes_read_1G_phy_reg(nesdev, 1, | ||
2126 | nesadapter->phy_index[mac_index], &phy_data); | ||
2127 | nes_debug(NES_DBG_PHY, "1G phy%u data from register 1 = 0x%X\n", | ||
2128 | nesadapter->phy_index[mac_index], phy_data); | ||
2129 | |||
2130 | if (temp_phy_data & 0x1000) { | ||
2131 | nes_debug(NES_DBG_PHY, "The Link is up according to the PHY\n"); | ||
2132 | phy_data = 4; | ||
2133 | } else { | ||
2134 | nes_debug(NES_DBG_PHY, "The Link is down according to the PHY\n"); | ||
2135 | } | ||
2136 | } | ||
2137 | nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n", | ||
2138 | nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0), | ||
2139 | nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200)); | ||
2140 | pcs_control_status = nes_read_indexed(nesdev, | ||
2141 | NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200)); | ||
2142 | pcs_control_status = nes_read_indexed(nesdev, | ||
2143 | NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200)); | ||
2144 | nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n", | ||
2145 | mac_index, pcs_control_status); | ||
2146 | if (nesadapter->OneG_Mode) { | ||
2147 | u32temp = 0x01010000; | ||
2148 | if (nesadapter->port_count > 2) { | ||
2149 | u32temp |= 0x02020000; | ||
2150 | } | ||
2151 | if ((pcs_control_status & u32temp)!= u32temp) { | ||
2152 | phy_data = 0; | ||
2153 | nes_debug(NES_DBG_PHY, "PCS says the link is down\n"); | ||
2154 | } | ||
2155 | } else if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) { | ||
2156 | nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]); | ||
2157 | temp_phy_data = (u16)nes_read_indexed(nesdev, | ||
2158 | NES_IDX_MAC_MDIO_CONTROL); | ||
2159 | u32temp = 20; | ||
2160 | do { | ||
2161 | nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]); | ||
2162 | phy_data = (u16)nes_read_indexed(nesdev, | ||
2163 | NES_IDX_MAC_MDIO_CONTROL); | ||
2164 | if ((phy_data == temp_phy_data) || (!(--u32temp))) | ||
2165 | break; | ||
2166 | temp_phy_data = phy_data; | ||
2167 | } while (1); | ||
2168 | nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", | ||
2169 | __FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP"); | ||
2170 | |||
2171 | } else { | ||
2172 | phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0; | ||
2173 | } | ||
2174 | |||
2175 | if (phy_data & 0x0004) { | ||
2176 | nesadapter->mac_link_down[mac_index] = 0; | ||
2177 | list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { | ||
2178 | nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", | ||
2179 | nesvnic->linkup); | ||
2180 | if (nesvnic->linkup == 0) { | ||
2181 | printk(PFX "The Link is now up for port %u, netdev %p.\n", | ||
2182 | mac_index, nesvnic->netdev); | ||
2183 | if (netif_queue_stopped(nesvnic->netdev)) | ||
2184 | netif_start_queue(nesvnic->netdev); | ||
2185 | nesvnic->linkup = 1; | ||
2186 | netif_carrier_on(nesvnic->netdev); | ||
2187 | } | ||
2188 | } | ||
2189 | } else { | ||
2190 | nesadapter->mac_link_down[mac_index] = 1; | ||
2191 | list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { | ||
2192 | nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", | ||
2193 | nesvnic->linkup); | ||
2194 | if (nesvnic->linkup == 1) { | ||
2195 | printk(PFX "The Link is now down for port %u, netdev %p.\n", | ||
2196 | mac_index, nesvnic->netdev); | ||
2197 | if (!(netif_queue_stopped(nesvnic->netdev))) | ||
2198 | netif_stop_queue(nesvnic->netdev); | ||
2199 | nesvnic->linkup = 0; | ||
2200 | netif_carrier_off(nesvnic->netdev); | ||
2201 | } | ||
2202 | } | ||
2203 | } | ||
2204 | } | ||
2205 | |||
2206 | nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; | ||
2207 | } | ||
2208 | |||
2209 | |||
2210 | |||
2211 | void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | ||
2212 | { | ||
2213 | struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); | ||
2214 | |||
2215 | netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi); | ||
2216 | } | ||
2217 | |||
2218 | |||
2219 | /* The MAX_RQES_TO_PROCESS defines how many max read requests to complete before | ||
2220 | * getting out of nic_ce_handler | ||
2221 | */ | ||
2222 | #define MAX_RQES_TO_PROCESS 384 | ||
2223 | |||
2224 | /** | ||
2225 | * nes_nic_ce_handler | ||
2226 | */ | ||
2227 | void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | ||
2228 | { | ||
2229 | u64 u64temp; | ||
2230 | dma_addr_t bus_address; | ||
2231 | struct nes_hw_nic *nesnic; | ||
2232 | struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); | ||
2233 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
2234 | struct nes_hw_nic_rq_wqe *nic_rqe; | ||
2235 | struct nes_hw_nic_sq_wqe *nic_sqe; | ||
2236 | struct sk_buff *skb; | ||
2237 | struct sk_buff *rx_skb; | ||
2238 | __le16 *wqe_fragment_length; | ||
2239 | u32 head; | ||
2240 | u32 cq_size; | ||
2241 | u32 rx_pkt_size; | ||
2242 | u32 cqe_count=0; | ||
2243 | u32 cqe_errv; | ||
2244 | u32 cqe_misc; | ||
2245 | u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */ | ||
2246 | u16 vlan_tag; | ||
2247 | u16 pkt_type; | ||
2248 | u16 rqes_processed = 0; | ||
2249 | u8 sq_cqes = 0; | ||
2250 | |||
2251 | head = cq->cq_head; | ||
2252 | cq_size = cq->cq_size; | ||
2253 | cq->cqes_pending = 1; | ||
2254 | do { | ||
2255 | if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) & | ||
2256 | NES_NIC_CQE_VALID) { | ||
2257 | nesnic = &nesvnic->nic; | ||
2258 | cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]); | ||
2259 | if (cqe_misc & NES_NIC_CQE_SQ) { | ||
2260 | sq_cqes++; | ||
2261 | wqe_fragment_index = 1; | ||
2262 | nic_sqe = &nesnic->sq_vbase[nesnic->sq_tail]; | ||
2263 | skb = nesnic->tx_skb[nesnic->sq_tail]; | ||
2264 | wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; | ||
2265 | /* bump past the vlan tag */ | ||
2266 | wqe_fragment_length++; | ||
2267 | if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) { | ||
2268 | u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]); | ||
2269 | u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32; | ||
2270 | bus_address = (dma_addr_t)u64temp; | ||
2271 | if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) { | ||
2272 | pci_unmap_single(nesdev->pcidev, | ||
2273 | bus_address, | ||
2274 | le16_to_cpu(wqe_fragment_length[wqe_fragment_index++]), | ||
2275 | PCI_DMA_TODEVICE); | ||
2276 | } | ||
2277 | for (; wqe_fragment_index < 5; wqe_fragment_index++) { | ||
2278 | if (wqe_fragment_length[wqe_fragment_index]) { | ||
2279 | u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]); | ||
2280 | u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32; | ||
2281 | bus_address = (dma_addr_t)u64temp; | ||
2282 | pci_unmap_page(nesdev->pcidev, | ||
2283 | bus_address, | ||
2284 | le16_to_cpu(wqe_fragment_length[wqe_fragment_index]), | ||
2285 | PCI_DMA_TODEVICE); | ||
2286 | } else | ||
2287 | break; | ||
2288 | } | ||
2289 | if (skb) | ||
2290 | dev_kfree_skb_any(skb); | ||
2291 | } | ||
2292 | nesnic->sq_tail++; | ||
2293 | nesnic->sq_tail &= nesnic->sq_size-1; | ||
2294 | if (sq_cqes > 128) { | ||
2295 | barrier(); | ||
2296 | /* restart the queue if it had been stopped */ | ||
2297 | if (netif_queue_stopped(nesvnic->netdev)) | ||
2298 | netif_wake_queue(nesvnic->netdev); | ||
2299 | sq_cqes = 0; | ||
2300 | } | ||
2301 | } else { | ||
2302 | rqes_processed ++; | ||
2303 | |||
2304 | cq->rx_cqes_completed++; | ||
2305 | cq->rx_pkts_indicated++; | ||
2306 | rx_pkt_size = cqe_misc & 0x0000ffff; | ||
2307 | nic_rqe = &nesnic->rq_vbase[nesnic->rq_tail]; | ||
2308 | /* Get the skb */ | ||
2309 | rx_skb = nesnic->rx_skb[nesnic->rq_tail]; | ||
2310 | nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_tail]; | ||
2311 | bus_address = (dma_addr_t)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); | ||
2312 | bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; | ||
2313 | pci_unmap_single(nesdev->pcidev, bus_address, | ||
2314 | nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); | ||
2315 | /* rx_skb->tail = rx_skb->data + rx_pkt_size; */ | ||
2316 | /* rx_skb->len = rx_pkt_size; */ | ||
2317 | rx_skb->len = 0; /* TODO: see if this is necessary */ | ||
2318 | skb_put(rx_skb, rx_pkt_size); | ||
2319 | rx_skb->protocol = eth_type_trans(rx_skb, nesvnic->netdev); | ||
2320 | nesnic->rq_tail++; | ||
2321 | nesnic->rq_tail &= nesnic->rq_size - 1; | ||
2322 | |||
2323 | atomic_inc(&nesvnic->rx_skbs_needed); | ||
2324 | if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) { | ||
2325 | nes_write32(nesdev->regs+NES_CQE_ALLOC, | ||
2326 | cq->cq_number | (cqe_count << 16)); | ||
2327 | // nesadapter->tune_timer.cq_count += cqe_count; | ||
2328 | nesdev->currcq_count += cqe_count; | ||
2329 | cqe_count = 0; | ||
2330 | nes_replenish_nic_rq(nesvnic); | ||
2331 | } | ||
2332 | pkt_type = (u16)(le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])); | ||
2333 | cqe_errv = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >> NES_NIC_CQE_ERRV_SHIFT; | ||
2334 | rx_skb->ip_summed = CHECKSUM_NONE; | ||
2335 | |||
2336 | if ((NES_PKT_TYPE_TCPV4_BITS == (pkt_type & NES_PKT_TYPE_TCPV4_MASK)) || | ||
2337 | (NES_PKT_TYPE_UDPV4_BITS == (pkt_type & NES_PKT_TYPE_UDPV4_MASK))) { | ||
2338 | if ((cqe_errv & | ||
2339 | (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR | | ||
2340 | NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) { | ||
2341 | if (nesvnic->rx_checksum_disabled == 0) { | ||
2342 | rx_skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2343 | } | ||
2344 | } else | ||
2345 | nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet." | ||
2346 | " errv = 0x%X, pkt_type = 0x%X.\n", | ||
2347 | nesvnic->netdev->name, cqe_errv, pkt_type); | ||
2348 | |||
2349 | } else if ((pkt_type & NES_PKT_TYPE_IPV4_MASK) == NES_PKT_TYPE_IPV4_BITS) { | ||
2350 | if ((cqe_errv & | ||
2351 | (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR | | ||
2352 | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) { | ||
2353 | if (nesvnic->rx_checksum_disabled == 0) { | ||
2354 | rx_skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2355 | /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n", | ||
2356 | nesvnic->netdev->name); */ | ||
2357 | } | ||
2358 | } else | ||
2359 | nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet." | ||
2360 | " errv = 0x%X, pkt_type = 0x%X.\n", | ||
2361 | nesvnic->netdev->name, cqe_errv, pkt_type); | ||
2362 | } | ||
2363 | /* nes_debug(NES_DBG_CQ, "pkt_type=%x, APBVT_MASK=%x\n", | ||
2364 | pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */ | ||
2365 | |||
2366 | if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) { | ||
2367 | nes_cm_recv(rx_skb, nesvnic->netdev); | ||
2368 | } else { | ||
2369 | if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && (nesvnic->vlan_grp != NULL)) { | ||
2370 | vlan_tag = (u16)(le32_to_cpu( | ||
2371 | cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]) | ||
2372 | >> 16); | ||
2373 | nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", | ||
2374 | nesvnic->netdev->name, vlan_tag); | ||
2375 | nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag); | ||
2376 | } else { | ||
2377 | nes_netif_rx(rx_skb); | ||
2378 | } | ||
2379 | } | ||
2380 | |||
2381 | nesvnic->netdev->last_rx = jiffies; | ||
2382 | /* nesvnic->netstats.rx_packets++; */ | ||
2383 | /* nesvnic->netstats.rx_bytes += rx_pkt_size; */ | ||
2384 | } | ||
2385 | |||
2386 | cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0; | ||
2387 | /* Accounting... */ | ||
2388 | cqe_count++; | ||
2389 | if (++head >= cq_size) | ||
2390 | head = 0; | ||
2391 | if (cqe_count == 255) { | ||
2392 | /* Replenish Nic CQ */ | ||
2393 | nes_write32(nesdev->regs+NES_CQE_ALLOC, | ||
2394 | cq->cq_number | (cqe_count << 16)); | ||
2395 | // nesdev->nesadapter->tune_timer.cq_count += cqe_count; | ||
2396 | nesdev->currcq_count += cqe_count; | ||
2397 | cqe_count = 0; | ||
2398 | } | ||
2399 | |||
2400 | if (cq->rx_cqes_completed >= nesvnic->budget) | ||
2401 | break; | ||
2402 | } else { | ||
2403 | cq->cqes_pending = 0; | ||
2404 | break; | ||
2405 | } | ||
2406 | |||
2407 | } while (1); | ||
2408 | |||
2409 | if (sq_cqes) { | ||
2410 | barrier(); | ||
2411 | /* restart the queue if it had been stopped */ | ||
2412 | if (netif_queue_stopped(nesvnic->netdev)) | ||
2413 | netif_wake_queue(nesvnic->netdev); | ||
2414 | } | ||
2415 | |||
2416 | cq->cq_head = head; | ||
2417 | /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n", | ||
2418 | cq->cq_number, cqe_count, cq->cq_head); */ | ||
2419 | cq->cqe_allocs_pending = cqe_count; | ||
2420 | if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) | ||
2421 | { | ||
2422 | // nesdev->nesadapter->tune_timer.cq_count += cqe_count; | ||
2423 | nesdev->currcq_count += cqe_count; | ||
2424 | nes_nic_tune_timer(nesdev); | ||
2425 | } | ||
2426 | if (atomic_read(&nesvnic->rx_skbs_needed)) | ||
2427 | nes_replenish_nic_rq(nesvnic); | ||
2428 | } | ||
2429 | |||
2430 | |||
2431 | /** | ||
2432 | * nes_cqp_ce_handler | ||
2433 | */ | ||
2434 | void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | ||
2435 | { | ||
2436 | u64 u64temp; | ||
2437 | unsigned long flags; | ||
2438 | struct nes_hw_cqp *cqp = NULL; | ||
2439 | struct nes_cqp_request *cqp_request; | ||
2440 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
2441 | u32 head; | ||
2442 | u32 cq_size; | ||
2443 | u32 cqe_count=0; | ||
2444 | u32 error_code; | ||
2445 | /* u32 counter; */ | ||
2446 | |||
2447 | head = cq->cq_head; | ||
2448 | cq_size = cq->cq_size; | ||
2449 | |||
2450 | do { | ||
2451 | /* process the CQE */ | ||
2452 | /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head, | ||
2453 | le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */ | ||
2454 | |||
2455 | if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) { | ||
2456 | u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head]. | ||
2457 | cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | | ||
2458 | ((u64)(le32_to_cpu(cq->cq_vbase[head]. | ||
2459 | cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))); | ||
2460 | cqp = *((struct nes_hw_cqp **)&u64temp); | ||
2461 | |||
2462 | error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]); | ||
2463 | if (error_code) { | ||
2464 | nes_debug(NES_DBG_CQP, "Bad Completion code for opcode 0x%02X from CQP," | ||
2465 | " Major/Minor codes = 0x%04X:%04X.\n", | ||
2466 | le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f, | ||
2467 | (u16)(error_code >> 16), | ||
2468 | (u16)error_code); | ||
2469 | nes_debug(NES_DBG_CQP, "cqp: qp_id=%u, sq_head=%u, sq_tail=%u\n", | ||
2470 | cqp->qp_id, cqp->sq_head, cqp->sq_tail); | ||
2471 | } | ||
2472 | |||
2473 | u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail]. | ||
2474 | wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) | | ||
2475 | ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail]. | ||
2476 | wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX]))); | ||
2477 | cqp_request = *((struct nes_cqp_request **)&u64temp); | ||
2478 | if (cqp_request) { | ||
2479 | if (cqp_request->waiting) { | ||
2480 | /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */ | ||
2481 | cqp_request->major_code = (u16)(error_code >> 16); | ||
2482 | cqp_request->minor_code = (u16)error_code; | ||
2483 | barrier(); | ||
2484 | cqp_request->request_done = 1; | ||
2485 | wake_up(&cqp_request->waitq); | ||
2486 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
2487 | nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n", | ||
2488 | cqp_request, | ||
2489 | le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f); | ||
2490 | if (cqp_request->dynamic) { | ||
2491 | kfree(cqp_request); | ||
2492 | } else { | ||
2493 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2494 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2495 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2496 | } | ||
2497 | } | ||
2498 | } else if (cqp_request->callback) { | ||
2499 | /* Envoke the callback routine */ | ||
2500 | cqp_request->cqp_callback(nesdev, cqp_request); | ||
2501 | if (cqp_request->dynamic) { | ||
2502 | kfree(cqp_request); | ||
2503 | } else { | ||
2504 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2505 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2506 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2507 | } | ||
2508 | } else { | ||
2509 | nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n", | ||
2510 | cqp_request, | ||
2511 | le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f); | ||
2512 | if (cqp_request->dynamic) { | ||
2513 | kfree(cqp_request); | ||
2514 | } else { | ||
2515 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2516 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2517 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2518 | } | ||
2519 | } | ||
2520 | } else { | ||
2521 | wake_up(&nesdev->cqp.waitq); | ||
2522 | } | ||
2523 | |||
2524 | cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; | ||
2525 | nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (1 << 16)); | ||
2526 | if (++cqp->sq_tail >= cqp->sq_size) | ||
2527 | cqp->sq_tail = 0; | ||
2528 | |||
2529 | /* Accounting... */ | ||
2530 | cqe_count++; | ||
2531 | if (++head >= cq_size) | ||
2532 | head = 0; | ||
2533 | } else { | ||
2534 | break; | ||
2535 | } | ||
2536 | } while (1); | ||
2537 | cq->cq_head = head; | ||
2538 | |||
2539 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2540 | while ((!list_empty(&nesdev->cqp_pending_reqs)) && | ||
2541 | ((((nesdev->cqp.sq_tail+nesdev->cqp.sq_size)-nesdev->cqp.sq_head) & | ||
2542 | (nesdev->cqp.sq_size - 1)) != 1)) { | ||
2543 | cqp_request = list_entry(nesdev->cqp_pending_reqs.next, | ||
2544 | struct nes_cqp_request, list); | ||
2545 | list_del_init(&cqp_request->list); | ||
2546 | head = nesdev->cqp.sq_head++; | ||
2547 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
2548 | cqp_wqe = &nesdev->cqp.sq_vbase[head]; | ||
2549 | memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); | ||
2550 | barrier(); | ||
2551 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = | ||
2552 | cpu_to_le32((u32)((unsigned long)cqp_request)); | ||
2553 | cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = | ||
2554 | cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request))); | ||
2555 | nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n", | ||
2556 | cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head); | ||
2557 | /* Ring doorbell (1 WQEs) */ | ||
2558 | barrier(); | ||
2559 | nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id); | ||
2560 | } | ||
2561 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2562 | |||
2563 | /* Arm the CCQ */ | ||
2564 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | ||
2565 | cq->cq_number); | ||
2566 | nes_read32(nesdev->regs+NES_CQE_ALLOC); | ||
2567 | } | ||
2568 | |||
2569 | |||
2570 | /** | ||
2571 | * nes_process_iwarp_aeqe | ||
2572 | */ | ||
2573 | void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe) | ||
2574 | { | ||
2575 | u64 context; | ||
2576 | u64 aeqe_context = 0; | ||
2577 | unsigned long flags; | ||
2578 | struct nes_qp *nesqp; | ||
2579 | int resource_allocated; | ||
2580 | /* struct iw_cm_id *cm_id; */ | ||
2581 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
2582 | struct ib_event ibevent; | ||
2583 | /* struct iw_cm_event cm_event; */ | ||
2584 | u32 aeq_info; | ||
2585 | u32 next_iwarp_state = 0; | ||
2586 | u16 async_event_id; | ||
2587 | u8 tcp_state; | ||
2588 | u8 iwarp_state; | ||
2589 | |||
2590 | nes_debug(NES_DBG_AEQ, "\n"); | ||
2591 | aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); | ||
2592 | if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { | ||
2593 | context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); | ||
2594 | context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; | ||
2595 | } else { | ||
2596 | aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); | ||
2597 | aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; | ||
2598 | context = (unsigned long)nesadapter->qp_table[le32_to_cpu( | ||
2599 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | ||
2600 | BUG_ON(!context); | ||
2601 | } | ||
2602 | |||
2603 | async_event_id = (u16)aeq_info; | ||
2604 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; | ||
2605 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; | ||
2606 | nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," | ||
2607 | " Tcp state = %s, iWARP state = %s\n", | ||
2608 | async_event_id, | ||
2609 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, | ||
2610 | nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); | ||
2611 | |||
2612 | |||
2613 | switch (async_event_id) { | ||
2614 | case NES_AEQE_AEID_LLP_FIN_RECEIVED: | ||
2615 | nesqp = *((struct nes_qp **)&context); | ||
2616 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { | ||
2617 | nesqp->cm_id->add_ref(nesqp->cm_id); | ||
2618 | nes_add_ref(&nesqp->ibqp); | ||
2619 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, | ||
2620 | NES_TIMER_TYPE_CLOSE, 1, 0); | ||
2621 | nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d)," | ||
2622 | " need ae to finish up, original_last_aeq = 0x%04X." | ||
2623 | " last_aeq = 0x%04X, scheduling timer. TCP state = %d\n", | ||
2624 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
2625 | async_event_id, nesqp->last_aeq, tcp_state); | ||
2626 | } | ||
2627 | if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || | ||
2628 | (nesqp->ibqp_state != IB_QPS_RTS)) { | ||
2629 | /* FIN Received but tcp state or IB state moved on, | ||
2630 | should expect a close complete */ | ||
2631 | return; | ||
2632 | } | ||
2633 | case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: | ||
2634 | case NES_AEQE_AEID_LLP_CONNECTION_RESET: | ||
2635 | case NES_AEQE_AEID_TERMINATE_SENT: | ||
2636 | case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE: | ||
2637 | case NES_AEQE_AEID_RESET_SENT: | ||
2638 | nesqp = *((struct nes_qp **)&context); | ||
2639 | if (async_event_id == NES_AEQE_AEID_RESET_SENT) { | ||
2640 | tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
2641 | } | ||
2642 | nes_add_ref(&nesqp->ibqp); | ||
2643 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2644 | nesqp->hw_iwarp_state = iwarp_state; | ||
2645 | nesqp->hw_tcp_state = tcp_state; | ||
2646 | nesqp->last_aeq = async_event_id; | ||
2647 | |||
2648 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | ||
2649 | (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { | ||
2650 | nesqp->hte_added = 0; | ||
2651 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2652 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n", | ||
2653 | nesqp->hwqp.qp_id); | ||
2654 | nes_hw_modify_qp(nesdev, nesqp, | ||
2655 | NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0); | ||
2656 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2657 | } | ||
2658 | |||
2659 | if ((nesqp->ibqp_state == IB_QPS_RTS) && | ||
2660 | ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || | ||
2661 | (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2662 | switch (nesqp->hw_iwarp_state) { | ||
2663 | case NES_AEQE_IWARP_STATE_RTS: | ||
2664 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; | ||
2665 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; | ||
2666 | break; | ||
2667 | case NES_AEQE_IWARP_STATE_TERMINATE: | ||
2668 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; | ||
2669 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; | ||
2670 | if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { | ||
2671 | next_iwarp_state |= 0x02000000; | ||
2672 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
2673 | } | ||
2674 | break; | ||
2675 | default: | ||
2676 | next_iwarp_state = 0; | ||
2677 | } | ||
2678 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2679 | if (next_iwarp_state) { | ||
2680 | nes_add_ref(&nesqp->ibqp); | ||
2681 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," | ||
2682 | " also added another reference\n", | ||
2683 | nesqp->hwqp.qp_id, next_iwarp_state); | ||
2684 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); | ||
2685 | } | ||
2686 | nes_cm_disconn(nesqp); | ||
2687 | } else { | ||
2688 | if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { | ||
2689 | /* FIN Received but ib state not RTS, | ||
2690 | close complete will be on its way */ | ||
2691 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2692 | nes_rem_ref(&nesqp->ibqp); | ||
2693 | return; | ||
2694 | } | ||
2695 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2696 | if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { | ||
2697 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000; | ||
2698 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
2699 | nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," | ||
2700 | " also added another reference\n", | ||
2701 | nesqp->hwqp.qp_id, next_iwarp_state); | ||
2702 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); | ||
2703 | } | ||
2704 | nes_cm_disconn(nesqp); | ||
2705 | } | ||
2706 | break; | ||
2707 | case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: | ||
2708 | nesqp = *((struct nes_qp **)&context); | ||
2709 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2710 | nesqp->hw_iwarp_state = iwarp_state; | ||
2711 | nesqp->hw_tcp_state = tcp_state; | ||
2712 | nesqp->last_aeq = async_event_id; | ||
2713 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2714 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED" | ||
2715 | " event on QP%u \n Q2 Data:\n", | ||
2716 | nesqp->hwqp.qp_id); | ||
2717 | if (nesqp->ibqp.event_handler) { | ||
2718 | ibevent.device = nesqp->ibqp.device; | ||
2719 | ibevent.element.qp = &nesqp->ibqp; | ||
2720 | ibevent.event = IB_EVENT_QP_FATAL; | ||
2721 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2722 | } | ||
2723 | if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || | ||
2724 | ((nesqp->ibqp_state == IB_QPS_RTS)&& | ||
2725 | (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | ||
2726 | nes_add_ref(&nesqp->ibqp); | ||
2727 | nes_cm_disconn(nesqp); | ||
2728 | } else { | ||
2729 | nesqp->in_disconnect = 0; | ||
2730 | wake_up(&nesqp->kick_waitq); | ||
2731 | } | ||
2732 | break; | ||
2733 | case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: | ||
2734 | nesqp = *((struct nes_qp **)&context); | ||
2735 | nes_add_ref(&nesqp->ibqp); | ||
2736 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2737 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; | ||
2738 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
2739 | nesqp->last_aeq = async_event_id; | ||
2740 | if (nesqp->cm_id) { | ||
2741 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES" | ||
2742 | " event on QP%u, remote IP = 0x%08X \n", | ||
2743 | nesqp->hwqp.qp_id, | ||
2744 | ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr)); | ||
2745 | } else { | ||
2746 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES" | ||
2747 | " event on QP%u \n", | ||
2748 | nesqp->hwqp.qp_id); | ||
2749 | } | ||
2750 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2751 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET; | ||
2752 | nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); | ||
2753 | if (nesqp->ibqp.event_handler) { | ||
2754 | ibevent.device = nesqp->ibqp.device; | ||
2755 | ibevent.element.qp = &nesqp->ibqp; | ||
2756 | ibevent.event = IB_EVENT_QP_FATAL; | ||
2757 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2758 | } | ||
2759 | break; | ||
2760 | case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: | ||
2761 | if (NES_AEQE_INBOUND_RDMA&aeq_info) { | ||
2762 | nesqp = nesadapter->qp_table[le32_to_cpu( | ||
2763 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | ||
2764 | } else { | ||
2765 | /* TODO: get the actual WQE and mask off wqe index */ | ||
2766 | context &= ~((u64)511); | ||
2767 | nesqp = *((struct nes_qp **)&context); | ||
2768 | } | ||
2769 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2770 | nesqp->hw_iwarp_state = iwarp_state; | ||
2771 | nesqp->hw_tcp_state = tcp_state; | ||
2772 | nesqp->last_aeq = async_event_id; | ||
2773 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2774 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n", | ||
2775 | nesqp->hwqp.qp_id); | ||
2776 | if (nesqp->ibqp.event_handler) { | ||
2777 | ibevent.device = nesqp->ibqp.device; | ||
2778 | ibevent.element.qp = &nesqp->ibqp; | ||
2779 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
2780 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2781 | } | ||
2782 | break; | ||
2783 | case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: | ||
2784 | nesqp = *((struct nes_qp **)&context); | ||
2785 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2786 | nesqp->hw_iwarp_state = iwarp_state; | ||
2787 | nesqp->hw_tcp_state = tcp_state; | ||
2788 | nesqp->last_aeq = async_event_id; | ||
2789 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2790 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n", | ||
2791 | nesqp->hwqp.qp_id); | ||
2792 | if (nesqp->ibqp.event_handler) { | ||
2793 | ibevent.device = nesqp->ibqp.device; | ||
2794 | ibevent.element.qp = &nesqp->ibqp; | ||
2795 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
2796 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2797 | } | ||
2798 | break; | ||
2799 | case NES_AEQE_AEID_PRIV_OPERATION_DENIED: | ||
2800 | nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words | ||
2801 | [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | ||
2802 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2803 | nesqp->hw_iwarp_state = iwarp_state; | ||
2804 | nesqp->hw_tcp_state = tcp_state; | ||
2805 | nesqp->last_aeq = async_event_id; | ||
2806 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2807 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u," | ||
2808 | " nesqp = %p, AE reported %p\n", | ||
2809 | nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context)); | ||
2810 | if (nesqp->ibqp.event_handler) { | ||
2811 | ibevent.device = nesqp->ibqp.device; | ||
2812 | ibevent.element.qp = &nesqp->ibqp; | ||
2813 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
2814 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2815 | } | ||
2816 | break; | ||
2817 | case NES_AEQE_AEID_CQ_OPERATION_ERROR: | ||
2818 | context <<= 1; | ||
2819 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", | ||
2820 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), (void *)(unsigned long)context); | ||
2821 | resource_allocated = nes_is_resource_allocated(nesadapter, nesadapter->allocated_cqs, | ||
2822 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); | ||
2823 | if (resource_allocated) { | ||
2824 | printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", | ||
2825 | __FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); | ||
2826 | } | ||
2827 | break; | ||
2828 | case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: | ||
2829 | nesqp = nesadapter->qp_table[le32_to_cpu( | ||
2830 | aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; | ||
2831 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2832 | nesqp->hw_iwarp_state = iwarp_state; | ||
2833 | nesqp->hw_tcp_state = tcp_state; | ||
2834 | nesqp->last_aeq = async_event_id; | ||
2835 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2836 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG" | ||
2837 | "_FOR_AVAILABLE_BUFFER event on QP%u\n", | ||
2838 | nesqp->hwqp.qp_id); | ||
2839 | if (nesqp->ibqp.event_handler) { | ||
2840 | ibevent.device = nesqp->ibqp.device; | ||
2841 | ibevent.element.qp = &nesqp->ibqp; | ||
2842 | ibevent.event = IB_EVENT_QP_ACCESS_ERR; | ||
2843 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2844 | } | ||
2845 | /* tell cm to disconnect, cm will queue work to thread */ | ||
2846 | nes_add_ref(&nesqp->ibqp); | ||
2847 | nes_cm_disconn(nesqp); | ||
2848 | break; | ||
2849 | case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: | ||
2850 | nesqp = *((struct nes_qp **)&context); | ||
2851 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2852 | nesqp->hw_iwarp_state = iwarp_state; | ||
2853 | nesqp->hw_tcp_state = tcp_state; | ||
2854 | nesqp->last_aeq = async_event_id; | ||
2855 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2856 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN" | ||
2857 | "_NO_BUFFER_AVAILABLE event on QP%u\n", | ||
2858 | nesqp->hwqp.qp_id); | ||
2859 | if (nesqp->ibqp.event_handler) { | ||
2860 | ibevent.device = nesqp->ibqp.device; | ||
2861 | ibevent.element.qp = &nesqp->ibqp; | ||
2862 | ibevent.event = IB_EVENT_QP_FATAL; | ||
2863 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2864 | } | ||
2865 | /* tell cm to disconnect, cm will queue work to thread */ | ||
2866 | nes_add_ref(&nesqp->ibqp); | ||
2867 | nes_cm_disconn(nesqp); | ||
2868 | break; | ||
2869 | case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: | ||
2870 | nesqp = *((struct nes_qp **)&context); | ||
2871 | spin_lock_irqsave(&nesqp->lock, flags); | ||
2872 | nesqp->hw_iwarp_state = iwarp_state; | ||
2873 | nesqp->hw_tcp_state = tcp_state; | ||
2874 | nesqp->last_aeq = async_event_id; | ||
2875 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
2876 | nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR" | ||
2877 | " event on QP%u \n Q2 Data:\n", | ||
2878 | nesqp->hwqp.qp_id); | ||
2879 | if (nesqp->ibqp.event_handler) { | ||
2880 | ibevent.device = nesqp->ibqp.device; | ||
2881 | ibevent.element.qp = &nesqp->ibqp; | ||
2882 | ibevent.event = IB_EVENT_QP_FATAL; | ||
2883 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
2884 | } | ||
2885 | /* tell cm to disconnect, cm will queue work to thread */ | ||
2886 | nes_add_ref(&nesqp->ibqp); | ||
2887 | nes_cm_disconn(nesqp); | ||
2888 | break; | ||
2889 | /* TODO: additional AEs need to be here */ | ||
2890 | default: | ||
2891 | nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", | ||
2892 | async_event_id); | ||
2893 | break; | ||
2894 | } | ||
2895 | |||
2896 | } | ||
2897 | |||
2898 | |||
2899 | /** | ||
2900 | * nes_iwarp_ce_handler | ||
2901 | */ | ||
2902 | void nes_iwarp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *hw_cq) | ||
2903 | { | ||
2904 | struct nes_cq *nescq = container_of(hw_cq, struct nes_cq, hw_cq); | ||
2905 | |||
2906 | /* nes_debug(NES_DBG_CQ, "Processing completion event for iWARP CQ%u.\n", | ||
2907 | nescq->hw_cq.cq_number); */ | ||
2908 | nes_write32(nesdev->regs+NES_CQ_ACK, nescq->hw_cq.cq_number); | ||
2909 | |||
2910 | if (nescq->ibcq.comp_handler) | ||
2911 | nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context); | ||
2912 | |||
2913 | return; | ||
2914 | } | ||
2915 | |||
2916 | |||
2917 | /** | ||
2918 | * nes_manage_apbvt() | ||
2919 | */ | ||
2920 | int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port, | ||
2921 | u32 nic_index, u32 add_port) | ||
2922 | { | ||
2923 | struct nes_device *nesdev = nesvnic->nesdev; | ||
2924 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
2925 | unsigned long flags; | ||
2926 | struct nes_cqp_request *cqp_request; | ||
2927 | int ret = 0; | ||
2928 | u16 major_code; | ||
2929 | |||
2930 | /* Send manage APBVT request to CQP */ | ||
2931 | cqp_request = nes_get_cqp_request(nesdev); | ||
2932 | if (cqp_request == NULL) { | ||
2933 | nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); | ||
2934 | return -ENOMEM; | ||
2935 | } | ||
2936 | cqp_request->waiting = 1; | ||
2937 | cqp_wqe = &cqp_request->cqp_wqe; | ||
2938 | |||
2939 | nes_debug(NES_DBG_QP, "%s APBV for local port=%u(0x%04x), nic_index=%u\n", | ||
2940 | (add_port == NES_MANAGE_APBVT_ADD) ? "ADD" : "DEL", | ||
2941 | accel_local_port, accel_local_port, nic_index); | ||
2942 | |||
2943 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
2944 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_MANAGE_APBVT | | ||
2945 | ((add_port == NES_MANAGE_APBVT_ADD) ? NES_CQP_APBVT_ADD : 0))); | ||
2946 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
2947 | ((nic_index << NES_CQP_APBVT_NIC_SHIFT) | accel_local_port)); | ||
2948 | |||
2949 | nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n"); | ||
2950 | |||
2951 | atomic_set(&cqp_request->refcount, 2); | ||
2952 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
2953 | |||
2954 | if (add_port == NES_MANAGE_APBVT_ADD) | ||
2955 | ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), | ||
2956 | NES_EVENT_TIMEOUT); | ||
2957 | nes_debug(NES_DBG_QP, "Completed, ret=%u, CQP Major:Minor codes = 0x%04X:0x%04X\n", | ||
2958 | ret, cqp_request->major_code, cqp_request->minor_code); | ||
2959 | major_code = cqp_request->major_code; | ||
2960 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
2961 | if (cqp_request->dynamic) { | ||
2962 | kfree(cqp_request); | ||
2963 | } else { | ||
2964 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2965 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2966 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2967 | } | ||
2968 | } | ||
2969 | if (!ret) | ||
2970 | return -ETIME; | ||
2971 | else if (major_code) | ||
2972 | return -EIO; | ||
2973 | else | ||
2974 | return 0; | ||
2975 | } | ||
2976 | |||
2977 | |||
2978 | /** | ||
2979 | * nes_manage_arp_cache | ||
2980 | */ | ||
2981 | void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr, | ||
2982 | u32 ip_addr, u32 action) | ||
2983 | { | ||
2984 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
2985 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
2986 | struct nes_device *nesdev; | ||
2987 | struct nes_cqp_request *cqp_request; | ||
2988 | int arp_index; | ||
2989 | |||
2990 | nesdev = nesvnic->nesdev; | ||
2991 | arp_index = nes_arp_table(nesdev, ip_addr, mac_addr, action); | ||
2992 | if (arp_index == -1) { | ||
2993 | return; | ||
2994 | } | ||
2995 | |||
2996 | /* update the ARP entry */ | ||
2997 | cqp_request = nes_get_cqp_request(nesdev); | ||
2998 | if (cqp_request == NULL) { | ||
2999 | nes_debug(NES_DBG_NETDEV, "Failed to get a cqp_request.\n"); | ||
3000 | return; | ||
3001 | } | ||
3002 | cqp_request->waiting = 0; | ||
3003 | cqp_wqe = &cqp_request->cqp_wqe; | ||
3004 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
3005 | |||
3006 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32( | ||
3007 | NES_CQP_MANAGE_ARP_CACHE | NES_CQP_ARP_PERM); | ||
3008 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32( | ||
3009 | (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_CQP_ARP_AEQ_INDEX_SHIFT); | ||
3010 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(arp_index); | ||
3011 | |||
3012 | if (action == NES_ARP_ADD) { | ||
3013 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID); | ||
3014 | cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32( | ||
3015 | (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | | ||
3016 | (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); | ||
3017 | cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( | ||
3018 | (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); | ||
3019 | } else { | ||
3020 | cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; | ||
3021 | cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; | ||
3022 | } | ||
3023 | |||
3024 | nes_debug(NES_DBG_NETDEV, "Not waiting for CQP, cqp.sq_head=%u, cqp.sq_tail=%u\n", | ||
3025 | nesdev->cqp.sq_head, nesdev->cqp.sq_tail); | ||
3026 | |||
3027 | atomic_set(&cqp_request->refcount, 1); | ||
3028 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
3029 | } | ||
3030 | |||
3031 | |||
3032 | /** | ||
3033 | * flush_wqes | ||
3034 | */ | ||
3035 | void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, | ||
3036 | u32 which_wq, u32 wait_completion) | ||
3037 | { | ||
3038 | unsigned long flags; | ||
3039 | struct nes_cqp_request *cqp_request; | ||
3040 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
3041 | int ret; | ||
3042 | |||
3043 | cqp_request = nes_get_cqp_request(nesdev); | ||
3044 | if (cqp_request == NULL) { | ||
3045 | nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); | ||
3046 | return; | ||
3047 | } | ||
3048 | if (wait_completion) { | ||
3049 | cqp_request->waiting = 1; | ||
3050 | atomic_set(&cqp_request->refcount, 2); | ||
3051 | } else { | ||
3052 | cqp_request->waiting = 0; | ||
3053 | } | ||
3054 | cqp_wqe = &cqp_request->cqp_wqe; | ||
3055 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
3056 | |||
3057 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = | ||
3058 | cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); | ||
3059 | cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); | ||
3060 | |||
3061 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
3062 | |||
3063 | if (wait_completion) { | ||
3064 | /* Wait for CQP */ | ||
3065 | ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), | ||
3066 | NES_EVENT_TIMEOUT); | ||
3067 | nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u," | ||
3068 | " CQP Major:Minor codes = 0x%04X:0x%04X\n", | ||
3069 | ret, cqp_request->major_code, cqp_request->minor_code); | ||
3070 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
3071 | if (cqp_request->dynamic) { | ||
3072 | kfree(cqp_request); | ||
3073 | } else { | ||
3074 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
3075 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
3076 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
3077 | } | ||
3078 | } | ||
3079 | } | ||
3080 | } | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h new file mode 100644 index 000000000000..1e10df550c9e --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -0,0 +1,1206 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef __NES_HW_H | ||
34 | #define __NES_HW_H | ||
35 | |||
36 | #define NES_PHY_TYPE_1G 2 | ||
37 | #define NES_PHY_TYPE_IRIS 3 | ||
38 | #define NES_PHY_TYPE_PUMA_10G 6 | ||
39 | |||
40 | #define NES_MULTICAST_PF_MAX 8 | ||
41 | |||
42 | enum pci_regs { | ||
43 | NES_INT_STAT = 0x0000, | ||
44 | NES_INT_MASK = 0x0004, | ||
45 | NES_INT_PENDING = 0x0008, | ||
46 | NES_INTF_INT_STAT = 0x000C, | ||
47 | NES_INTF_INT_MASK = 0x0010, | ||
48 | NES_TIMER_STAT = 0x0014, | ||
49 | NES_PERIODIC_CONTROL = 0x0018, | ||
50 | NES_ONE_SHOT_CONTROL = 0x001C, | ||
51 | NES_EEPROM_COMMAND = 0x0020, | ||
52 | NES_EEPROM_DATA = 0x0024, | ||
53 | NES_FLASH_COMMAND = 0x0028, | ||
54 | NES_FLASH_DATA = 0x002C, | ||
55 | NES_SOFTWARE_RESET = 0x0030, | ||
56 | NES_CQ_ACK = 0x0034, | ||
57 | NES_WQE_ALLOC = 0x0040, | ||
58 | NES_CQE_ALLOC = 0x0044, | ||
59 | }; | ||
60 | |||
61 | enum indexed_regs { | ||
62 | NES_IDX_CREATE_CQP_LOW = 0x0000, | ||
63 | NES_IDX_CREATE_CQP_HIGH = 0x0004, | ||
64 | NES_IDX_QP_CONTROL = 0x0040, | ||
65 | NES_IDX_FLM_CONTROL = 0x0080, | ||
66 | NES_IDX_INT_CPU_STATUS = 0x00a0, | ||
67 | NES_IDX_GPIO_CONTROL = 0x00f0, | ||
68 | NES_IDX_GPIO_DATA = 0x00f4, | ||
69 | NES_IDX_TCP_CONFIG0 = 0x01e4, | ||
70 | NES_IDX_TCP_TIMER_CONFIG = 0x01ec, | ||
71 | NES_IDX_TCP_NOW = 0x01f0, | ||
72 | NES_IDX_QP_MAX_CFG_SIZES = 0x0200, | ||
73 | NES_IDX_QP_CTX_SIZE = 0x0218, | ||
74 | NES_IDX_TCP_TIMER_SIZE0 = 0x0238, | ||
75 | NES_IDX_TCP_TIMER_SIZE1 = 0x0240, | ||
76 | NES_IDX_ARP_CACHE_SIZE = 0x0258, | ||
77 | NES_IDX_CQ_CTX_SIZE = 0x0260, | ||
78 | NES_IDX_MRT_SIZE = 0x0278, | ||
79 | NES_IDX_PBL_REGION_SIZE = 0x0280, | ||
80 | NES_IDX_IRRQ_COUNT = 0x02b0, | ||
81 | NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x02f0, | ||
82 | NES_IDX_RX_WINDOW_BUFFER_SIZE = 0x0300, | ||
83 | NES_IDX_DST_IP_ADDR = 0x0400, | ||
84 | NES_IDX_PCIX_DIAG = 0x08e8, | ||
85 | NES_IDX_MPP_DEBUG = 0x0a00, | ||
86 | NES_IDX_PORT_RX_DISCARDS = 0x0a30, | ||
87 | NES_IDX_PORT_TX_DISCARDS = 0x0a34, | ||
88 | NES_IDX_MPP_LB_DEBUG = 0x0b00, | ||
89 | NES_IDX_DENALI_CTL_22 = 0x1058, | ||
90 | NES_IDX_MAC_TX_CONTROL = 0x2000, | ||
91 | NES_IDX_MAC_TX_CONFIG = 0x2004, | ||
92 | NES_IDX_MAC_TX_PAUSE_QUANTA = 0x2008, | ||
93 | NES_IDX_MAC_RX_CONTROL = 0x200c, | ||
94 | NES_IDX_MAC_RX_CONFIG = 0x2010, | ||
95 | NES_IDX_MAC_EXACT_MATCH_BOTTOM = 0x201c, | ||
96 | NES_IDX_MAC_MDIO_CONTROL = 0x2084, | ||
97 | NES_IDX_MAC_TX_OCTETS_LOW = 0x2100, | ||
98 | NES_IDX_MAC_TX_OCTETS_HIGH = 0x2104, | ||
99 | NES_IDX_MAC_TX_FRAMES_LOW = 0x2108, | ||
100 | NES_IDX_MAC_TX_FRAMES_HIGH = 0x210c, | ||
101 | NES_IDX_MAC_TX_PAUSE_FRAMES = 0x2118, | ||
102 | NES_IDX_MAC_TX_ERRORS = 0x2138, | ||
103 | NES_IDX_MAC_RX_OCTETS_LOW = 0x213c, | ||
104 | NES_IDX_MAC_RX_OCTETS_HIGH = 0x2140, | ||
105 | NES_IDX_MAC_RX_FRAMES_LOW = 0x2144, | ||
106 | NES_IDX_MAC_RX_FRAMES_HIGH = 0x2148, | ||
107 | NES_IDX_MAC_RX_BC_FRAMES_LOW = 0x214c, | ||
108 | NES_IDX_MAC_RX_MC_FRAMES_HIGH = 0x2150, | ||
109 | NES_IDX_MAC_RX_PAUSE_FRAMES = 0x2154, | ||
110 | NES_IDX_MAC_RX_SHORT_FRAMES = 0x2174, | ||
111 | NES_IDX_MAC_RX_OVERSIZED_FRAMES = 0x2178, | ||
112 | NES_IDX_MAC_RX_JABBER_FRAMES = 0x217c, | ||
113 | NES_IDX_MAC_RX_CRC_ERR_FRAMES = 0x2180, | ||
114 | NES_IDX_MAC_RX_LENGTH_ERR_FRAMES = 0x2184, | ||
115 | NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES = 0x2188, | ||
116 | NES_IDX_MAC_INT_STATUS = 0x21f0, | ||
117 | NES_IDX_MAC_INT_MASK = 0x21f4, | ||
118 | NES_IDX_PHY_PCS_CONTROL_STATUS0 = 0x2800, | ||
119 | NES_IDX_PHY_PCS_CONTROL_STATUS1 = 0x2a00, | ||
120 | NES_IDX_ETH_SERDES_COMMON_CONTROL0 = 0x2808, | ||
121 | NES_IDX_ETH_SERDES_COMMON_CONTROL1 = 0x2a08, | ||
122 | NES_IDX_ETH_SERDES_COMMON_STATUS0 = 0x280c, | ||
123 | NES_IDX_ETH_SERDES_COMMON_STATUS1 = 0x2a0c, | ||
124 | NES_IDX_ETH_SERDES_TX_EMP0 = 0x2810, | ||
125 | NES_IDX_ETH_SERDES_TX_EMP1 = 0x2a10, | ||
126 | NES_IDX_ETH_SERDES_TX_DRIVE0 = 0x2814, | ||
127 | NES_IDX_ETH_SERDES_TX_DRIVE1 = 0x2a14, | ||
128 | NES_IDX_ETH_SERDES_RX_MODE0 = 0x2818, | ||
129 | NES_IDX_ETH_SERDES_RX_MODE1 = 0x2a18, | ||
130 | NES_IDX_ETH_SERDES_RX_SIGDET0 = 0x281c, | ||
131 | NES_IDX_ETH_SERDES_RX_SIGDET1 = 0x2a1c, | ||
132 | NES_IDX_ETH_SERDES_BYPASS0 = 0x2820, | ||
133 | NES_IDX_ETH_SERDES_BYPASS1 = 0x2a20, | ||
134 | NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0 = 0x2824, | ||
135 | NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1 = 0x2a24, | ||
136 | NES_IDX_ETH_SERDES_RX_EQ_CONTROL0 = 0x2828, | ||
137 | NES_IDX_ETH_SERDES_RX_EQ_CONTROL1 = 0x2a28, | ||
138 | NES_IDX_ETH_SERDES_RX_EQ_STATUS0 = 0x282c, | ||
139 | NES_IDX_ETH_SERDES_RX_EQ_STATUS1 = 0x2a2c, | ||
140 | NES_IDX_ETH_SERDES_CDR_RESET0 = 0x2830, | ||
141 | NES_IDX_ETH_SERDES_CDR_RESET1 = 0x2a30, | ||
142 | NES_IDX_ETH_SERDES_CDR_CONTROL0 = 0x2834, | ||
143 | NES_IDX_ETH_SERDES_CDR_CONTROL1 = 0x2a34, | ||
144 | NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0 = 0x2838, | ||
145 | NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1 = 0x2a38, | ||
146 | NES_IDX_ENDNODE0_NSTAT_RX_DISCARD = 0x3080, | ||
147 | NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO = 0x3000, | ||
148 | NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI = 0x3004, | ||
149 | NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO = 0x3008, | ||
150 | NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI = 0x300c, | ||
151 | NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO = 0x7000, | ||
152 | NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004, | ||
153 | NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008, | ||
154 | NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c, | ||
155 | NES_IDX_CM_CONFIG = 0x5100, | ||
156 | NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000, | ||
157 | NES_IDX_NIC_PHYPORT_TO_USW = 0x6008, | ||
158 | NES_IDX_NIC_ACTIVE = 0x6010, | ||
159 | NES_IDX_NIC_UNICAST_ALL = 0x6018, | ||
160 | NES_IDX_NIC_MULTICAST_ALL = 0x6020, | ||
161 | NES_IDX_NIC_MULTICAST_ENABLE = 0x6028, | ||
162 | NES_IDX_NIC_BROADCAST_ON = 0x6030, | ||
163 | NES_IDX_USED_CHUNKS_TX = 0x60b0, | ||
164 | NES_IDX_TX_POOL_SIZE = 0x60b8, | ||
165 | NES_IDX_QUAD_HASH_TABLE_SIZE = 0x6148, | ||
166 | NES_IDX_PERFECT_FILTER_LOW = 0x6200, | ||
167 | NES_IDX_PERFECT_FILTER_HIGH = 0x6204, | ||
168 | NES_IDX_IPV4_TCP_REXMITS = 0x7080, | ||
169 | NES_IDX_DEBUG_ERROR_CONTROL_STATUS = 0x913c, | ||
170 | NES_IDX_DEBUG_ERROR_MASKS0 = 0x9140, | ||
171 | NES_IDX_DEBUG_ERROR_MASKS1 = 0x9144, | ||
172 | NES_IDX_DEBUG_ERROR_MASKS2 = 0x9148, | ||
173 | NES_IDX_DEBUG_ERROR_MASKS3 = 0x914c, | ||
174 | NES_IDX_DEBUG_ERROR_MASKS4 = 0x9150, | ||
175 | NES_IDX_DEBUG_ERROR_MASKS5 = 0x9154, | ||
176 | }; | ||
177 | |||
178 | #define NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE 1 | ||
179 | #define NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE (1 << 17) | ||
180 | |||
181 | enum nes_cqp_opcodes { | ||
182 | NES_CQP_CREATE_QP = 0x00, | ||
183 | NES_CQP_MODIFY_QP = 0x01, | ||
184 | NES_CQP_DESTROY_QP = 0x02, | ||
185 | NES_CQP_CREATE_CQ = 0x03, | ||
186 | NES_CQP_MODIFY_CQ = 0x04, | ||
187 | NES_CQP_DESTROY_CQ = 0x05, | ||
188 | NES_CQP_ALLOCATE_STAG = 0x09, | ||
189 | NES_CQP_REGISTER_STAG = 0x0a, | ||
190 | NES_CQP_QUERY_STAG = 0x0b, | ||
191 | NES_CQP_REGISTER_SHARED_STAG = 0x0c, | ||
192 | NES_CQP_DEALLOCATE_STAG = 0x0d, | ||
193 | NES_CQP_MANAGE_ARP_CACHE = 0x0f, | ||
194 | NES_CQP_SUSPEND_QPS = 0x11, | ||
195 | NES_CQP_UPLOAD_CONTEXT = 0x13, | ||
196 | NES_CQP_CREATE_CEQ = 0x16, | ||
197 | NES_CQP_DESTROY_CEQ = 0x18, | ||
198 | NES_CQP_CREATE_AEQ = 0x19, | ||
199 | NES_CQP_DESTROY_AEQ = 0x1b, | ||
200 | NES_CQP_LMI_ACCESS = 0x20, | ||
201 | NES_CQP_FLUSH_WQES = 0x22, | ||
202 | NES_CQP_MANAGE_APBVT = 0x23 | ||
203 | }; | ||
204 | |||
205 | enum nes_cqp_wqe_word_idx { | ||
206 | NES_CQP_WQE_OPCODE_IDX = 0, | ||
207 | NES_CQP_WQE_ID_IDX = 1, | ||
208 | NES_CQP_WQE_COMP_CTX_LOW_IDX = 2, | ||
209 | NES_CQP_WQE_COMP_CTX_HIGH_IDX = 3, | ||
210 | NES_CQP_WQE_COMP_SCRATCH_LOW_IDX = 4, | ||
211 | NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5, | ||
212 | }; | ||
213 | |||
214 | enum nes_cqp_cq_wqeword_idx { | ||
215 | NES_CQP_CQ_WQE_PBL_LOW_IDX = 6, | ||
216 | NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7, | ||
217 | NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX = 8, | ||
218 | NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX = 9, | ||
219 | NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX = 10, | ||
220 | }; | ||
221 | |||
222 | enum nes_cqp_stag_wqeword_idx { | ||
223 | NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX = 1, | ||
224 | NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX = 6, | ||
225 | NES_CQP_STAG_WQE_LEN_LOW_IDX = 7, | ||
226 | NES_CQP_STAG_WQE_STAG_IDX = 8, | ||
227 | NES_CQP_STAG_WQE_VA_LOW_IDX = 10, | ||
228 | NES_CQP_STAG_WQE_VA_HIGH_IDX = 11, | ||
229 | NES_CQP_STAG_WQE_PA_LOW_IDX = 12, | ||
230 | NES_CQP_STAG_WQE_PA_HIGH_IDX = 13, | ||
231 | NES_CQP_STAG_WQE_PBL_LEN_IDX = 14 | ||
232 | }; | ||
233 | |||
234 | #define NES_CQP_OP_IWARP_STATE_SHIFT 28 | ||
235 | |||
236 | enum nes_cqp_qp_bits { | ||
237 | NES_CQP_QP_ARP_VALID = (1<<8), | ||
238 | NES_CQP_QP_WINBUF_VALID = (1<<9), | ||
239 | NES_CQP_QP_CONTEXT_VALID = (1<<10), | ||
240 | NES_CQP_QP_ORD_VALID = (1<<11), | ||
241 | NES_CQP_QP_WINBUF_DATAIND_EN = (1<<12), | ||
242 | NES_CQP_QP_VIRT_WQS = (1<<13), | ||
243 | NES_CQP_QP_DEL_HTE = (1<<14), | ||
244 | NES_CQP_QP_CQS_VALID = (1<<15), | ||
245 | NES_CQP_QP_TYPE_TSA = 0, | ||
246 | NES_CQP_QP_TYPE_IWARP = (1<<16), | ||
247 | NES_CQP_QP_TYPE_CQP = (4<<16), | ||
248 | NES_CQP_QP_TYPE_NIC = (5<<16), | ||
249 | NES_CQP_QP_MSS_CHG = (1<<20), | ||
250 | NES_CQP_QP_STATIC_RESOURCES = (1<<21), | ||
251 | NES_CQP_QP_IGNORE_MW_BOUND = (1<<22), | ||
252 | NES_CQP_QP_VWQ_USE_LMI = (1<<23), | ||
253 | NES_CQP_QP_IWARP_STATE_IDLE = (1<<NES_CQP_OP_IWARP_STATE_SHIFT), | ||
254 | NES_CQP_QP_IWARP_STATE_RTS = (2<<NES_CQP_OP_IWARP_STATE_SHIFT), | ||
255 | NES_CQP_QP_IWARP_STATE_CLOSING = (3<<NES_CQP_OP_IWARP_STATE_SHIFT), | ||
256 | NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), | ||
257 | NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), | ||
258 | NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), | ||
259 | NES_CQP_QP_RESET = (1<<31), | ||
260 | }; | ||
261 | |||
262 | enum nes_cqp_qp_wqe_word_idx { | ||
263 | NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, | ||
264 | NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, | ||
265 | NES_CQP_QP_WQE_NEW_MSS_IDX = 15, | ||
266 | }; | ||
267 | |||
268 | enum nes_nic_ctx_bits { | ||
269 | NES_NIC_CTX_RQ_SIZE_32 = (3<<8), | ||
270 | NES_NIC_CTX_RQ_SIZE_512 = (3<<8), | ||
271 | NES_NIC_CTX_SQ_SIZE_32 = (1<<10), | ||
272 | NES_NIC_CTX_SQ_SIZE_512 = (3<<10), | ||
273 | }; | ||
274 | |||
275 | enum nes_nic_qp_ctx_word_idx { | ||
276 | NES_NIC_CTX_MISC_IDX = 0, | ||
277 | NES_NIC_CTX_SQ_LOW_IDX = 2, | ||
278 | NES_NIC_CTX_SQ_HIGH_IDX = 3, | ||
279 | NES_NIC_CTX_RQ_LOW_IDX = 4, | ||
280 | NES_NIC_CTX_RQ_HIGH_IDX = 5, | ||
281 | }; | ||
282 | |||
283 | enum nes_cqp_cq_bits { | ||
284 | NES_CQP_CQ_CEQE_MASK = (1<<9), | ||
285 | NES_CQP_CQ_CEQ_VALID = (1<<10), | ||
286 | NES_CQP_CQ_RESIZE = (1<<11), | ||
287 | NES_CQP_CQ_CHK_OVERFLOW = (1<<12), | ||
288 | NES_CQP_CQ_4KB_CHUNK = (1<<14), | ||
289 | NES_CQP_CQ_VIRT = (1<<15), | ||
290 | }; | ||
291 | |||
292 | enum nes_cqp_stag_bits { | ||
293 | NES_CQP_STAG_VA_TO = (1<<9), | ||
294 | NES_CQP_STAG_DEALLOC_PBLS = (1<<10), | ||
295 | NES_CQP_STAG_PBL_BLK_SIZE = (1<<11), | ||
296 | NES_CQP_STAG_MR = (1<<13), | ||
297 | NES_CQP_STAG_RIGHTS_LOCAL_READ = (1<<16), | ||
298 | NES_CQP_STAG_RIGHTS_LOCAL_WRITE = (1<<17), | ||
299 | NES_CQP_STAG_RIGHTS_REMOTE_READ = (1<<18), | ||
300 | NES_CQP_STAG_RIGHTS_REMOTE_WRITE = (1<<19), | ||
301 | NES_CQP_STAG_RIGHTS_WINDOW_BIND = (1<<20), | ||
302 | NES_CQP_STAG_REM_ACC_EN = (1<<21), | ||
303 | NES_CQP_STAG_LEAVE_PENDING = (1<<31), | ||
304 | }; | ||
305 | |||
306 | enum nes_cqp_ceq_wqeword_idx { | ||
307 | NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX = 1, | ||
308 | NES_CQP_CEQ_WQE_PBL_LOW_IDX = 6, | ||
309 | NES_CQP_CEQ_WQE_PBL_HIGH_IDX = 7, | ||
310 | }; | ||
311 | |||
312 | enum nes_cqp_ceq_bits { | ||
313 | NES_CQP_CEQ_4KB_CHUNK = (1<<14), | ||
314 | NES_CQP_CEQ_VIRT = (1<<15), | ||
315 | }; | ||
316 | |||
317 | enum nes_cqp_aeq_wqeword_idx { | ||
318 | NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX = 1, | ||
319 | NES_CQP_AEQ_WQE_PBL_LOW_IDX = 6, | ||
320 | NES_CQP_AEQ_WQE_PBL_HIGH_IDX = 7, | ||
321 | }; | ||
322 | |||
323 | enum nes_cqp_aeq_bits { | ||
324 | NES_CQP_AEQ_4KB_CHUNK = (1<<14), | ||
325 | NES_CQP_AEQ_VIRT = (1<<15), | ||
326 | }; | ||
327 | |||
328 | enum nes_cqp_lmi_wqeword_idx { | ||
329 | NES_CQP_LMI_WQE_LMI_OFFSET_IDX = 1, | ||
330 | NES_CQP_LMI_WQE_FRAG_LOW_IDX = 8, | ||
331 | NES_CQP_LMI_WQE_FRAG_HIGH_IDX = 9, | ||
332 | NES_CQP_LMI_WQE_FRAG_LEN_IDX = 10, | ||
333 | }; | ||
334 | |||
335 | enum nes_cqp_arp_wqeword_idx { | ||
336 | NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX = 6, | ||
337 | NES_CQP_ARP_WQE_MAC_HIGH_IDX = 7, | ||
338 | NES_CQP_ARP_WQE_REACHABILITY_MAX_IDX = 1, | ||
339 | }; | ||
340 | |||
341 | enum nes_cqp_upload_wqeword_idx { | ||
342 | NES_CQP_UPLOAD_WQE_CTXT_LOW_IDX = 6, | ||
343 | NES_CQP_UPLOAD_WQE_CTXT_HIGH_IDX = 7, | ||
344 | NES_CQP_UPLOAD_WQE_HTE_IDX = 8, | ||
345 | }; | ||
346 | |||
347 | enum nes_cqp_arp_bits { | ||
348 | NES_CQP_ARP_VALID = (1<<8), | ||
349 | NES_CQP_ARP_PERM = (1<<9), | ||
350 | }; | ||
351 | |||
352 | enum nes_cqp_flush_bits { | ||
353 | NES_CQP_FLUSH_SQ = (1<<30), | ||
354 | NES_CQP_FLUSH_RQ = (1<<31), | ||
355 | }; | ||
356 | |||
357 | enum nes_cqe_opcode_bits { | ||
358 | NES_CQE_STAG_VALID = (1<<6), | ||
359 | NES_CQE_ERROR = (1<<7), | ||
360 | NES_CQE_SQ = (1<<8), | ||
361 | NES_CQE_SE = (1<<9), | ||
362 | NES_CQE_PSH = (1<<29), | ||
363 | NES_CQE_FIN = (1<<30), | ||
364 | NES_CQE_VALID = (1<<31), | ||
365 | }; | ||
366 | |||
367 | |||
368 | enum nes_cqe_word_idx { | ||
369 | NES_CQE_PAYLOAD_LENGTH_IDX = 0, | ||
370 | NES_CQE_COMP_COMP_CTX_LOW_IDX = 2, | ||
371 | NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3, | ||
372 | NES_CQE_INV_STAG_IDX = 4, | ||
373 | NES_CQE_QP_ID_IDX = 5, | ||
374 | NES_CQE_ERROR_CODE_IDX = 6, | ||
375 | NES_CQE_OPCODE_IDX = 7, | ||
376 | }; | ||
377 | |||
378 | enum nes_ceqe_word_idx { | ||
379 | NES_CEQE_CQ_CTX_LOW_IDX = 0, | ||
380 | NES_CEQE_CQ_CTX_HIGH_IDX = 1, | ||
381 | }; | ||
382 | |||
383 | enum nes_ceqe_status_bit { | ||
384 | NES_CEQE_VALID = (1<<31), | ||
385 | }; | ||
386 | |||
387 | enum nes_int_bits { | ||
388 | NES_INT_CEQ0 = (1<<0), | ||
389 | NES_INT_CEQ1 = (1<<1), | ||
390 | NES_INT_CEQ2 = (1<<2), | ||
391 | NES_INT_CEQ3 = (1<<3), | ||
392 | NES_INT_CEQ4 = (1<<4), | ||
393 | NES_INT_CEQ5 = (1<<5), | ||
394 | NES_INT_CEQ6 = (1<<6), | ||
395 | NES_INT_CEQ7 = (1<<7), | ||
396 | NES_INT_CEQ8 = (1<<8), | ||
397 | NES_INT_CEQ9 = (1<<9), | ||
398 | NES_INT_CEQ10 = (1<<10), | ||
399 | NES_INT_CEQ11 = (1<<11), | ||
400 | NES_INT_CEQ12 = (1<<12), | ||
401 | NES_INT_CEQ13 = (1<<13), | ||
402 | NES_INT_CEQ14 = (1<<14), | ||
403 | NES_INT_CEQ15 = (1<<15), | ||
404 | NES_INT_AEQ0 = (1<<16), | ||
405 | NES_INT_AEQ1 = (1<<17), | ||
406 | NES_INT_AEQ2 = (1<<18), | ||
407 | NES_INT_AEQ3 = (1<<19), | ||
408 | NES_INT_AEQ4 = (1<<20), | ||
409 | NES_INT_AEQ5 = (1<<21), | ||
410 | NES_INT_AEQ6 = (1<<22), | ||
411 | NES_INT_AEQ7 = (1<<23), | ||
412 | NES_INT_MAC0 = (1<<24), | ||
413 | NES_INT_MAC1 = (1<<25), | ||
414 | NES_INT_MAC2 = (1<<26), | ||
415 | NES_INT_MAC3 = (1<<27), | ||
416 | NES_INT_TSW = (1<<28), | ||
417 | NES_INT_TIMER = (1<<29), | ||
418 | NES_INT_INTF = (1<<30), | ||
419 | }; | ||
420 | |||
421 | enum nes_intf_int_bits { | ||
422 | NES_INTF_INT_PCIERR = (1<<0), | ||
423 | NES_INTF_PERIODIC_TIMER = (1<<2), | ||
424 | NES_INTF_ONE_SHOT_TIMER = (1<<3), | ||
425 | NES_INTF_INT_CRITERR = (1<<14), | ||
426 | NES_INTF_INT_AEQ0_OFLOW = (1<<16), | ||
427 | NES_INTF_INT_AEQ1_OFLOW = (1<<17), | ||
428 | NES_INTF_INT_AEQ2_OFLOW = (1<<18), | ||
429 | NES_INTF_INT_AEQ3_OFLOW = (1<<19), | ||
430 | NES_INTF_INT_AEQ4_OFLOW = (1<<20), | ||
431 | NES_INTF_INT_AEQ5_OFLOW = (1<<21), | ||
432 | NES_INTF_INT_AEQ6_OFLOW = (1<<22), | ||
433 | NES_INTF_INT_AEQ7_OFLOW = (1<<23), | ||
434 | NES_INTF_INT_AEQ_OFLOW = (0xff<<16), | ||
435 | }; | ||
436 | |||
437 | enum nes_mac_int_bits { | ||
438 | NES_MAC_INT_LINK_STAT_CHG = (1<<1), | ||
439 | NES_MAC_INT_XGMII_EXT = (1<<2), | ||
440 | NES_MAC_INT_TX_UNDERFLOW = (1<<6), | ||
441 | NES_MAC_INT_TX_ERROR = (1<<7), | ||
442 | }; | ||
443 | |||
444 | enum nes_cqe_allocate_bits { | ||
445 | NES_CQE_ALLOC_INC_SELECT = (1<<28), | ||
446 | NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29), | ||
447 | NES_CQE_ALLOC_NOTIFY_SE = (1<<30), | ||
448 | NES_CQE_ALLOC_RESET = (1<<31), | ||
449 | }; | ||
450 | |||
451 | enum nes_nic_rq_wqe_word_idx { | ||
452 | NES_NIC_RQ_WQE_LENGTH_1_0_IDX = 0, | ||
453 | NES_NIC_RQ_WQE_LENGTH_3_2_IDX = 1, | ||
454 | NES_NIC_RQ_WQE_FRAG0_LOW_IDX = 2, | ||
455 | NES_NIC_RQ_WQE_FRAG0_HIGH_IDX = 3, | ||
456 | NES_NIC_RQ_WQE_FRAG1_LOW_IDX = 4, | ||
457 | NES_NIC_RQ_WQE_FRAG1_HIGH_IDX = 5, | ||
458 | NES_NIC_RQ_WQE_FRAG2_LOW_IDX = 6, | ||
459 | NES_NIC_RQ_WQE_FRAG2_HIGH_IDX = 7, | ||
460 | NES_NIC_RQ_WQE_FRAG3_LOW_IDX = 8, | ||
461 | NES_NIC_RQ_WQE_FRAG3_HIGH_IDX = 9, | ||
462 | }; | ||
463 | |||
464 | enum nes_nic_sq_wqe_word_idx { | ||
465 | NES_NIC_SQ_WQE_MISC_IDX = 0, | ||
466 | NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX = 1, | ||
467 | NES_NIC_SQ_WQE_LSO_INFO_IDX = 2, | ||
468 | NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX = 3, | ||
469 | NES_NIC_SQ_WQE_LENGTH_2_1_IDX = 4, | ||
470 | NES_NIC_SQ_WQE_LENGTH_4_3_IDX = 5, | ||
471 | NES_NIC_SQ_WQE_FRAG0_LOW_IDX = 6, | ||
472 | NES_NIC_SQ_WQE_FRAG0_HIGH_IDX = 7, | ||
473 | NES_NIC_SQ_WQE_FRAG1_LOW_IDX = 8, | ||
474 | NES_NIC_SQ_WQE_FRAG1_HIGH_IDX = 9, | ||
475 | NES_NIC_SQ_WQE_FRAG2_LOW_IDX = 10, | ||
476 | NES_NIC_SQ_WQE_FRAG2_HIGH_IDX = 11, | ||
477 | NES_NIC_SQ_WQE_FRAG3_LOW_IDX = 12, | ||
478 | NES_NIC_SQ_WQE_FRAG3_HIGH_IDX = 13, | ||
479 | NES_NIC_SQ_WQE_FRAG4_LOW_IDX = 14, | ||
480 | NES_NIC_SQ_WQE_FRAG4_HIGH_IDX = 15, | ||
481 | }; | ||
482 | |||
483 | enum nes_iwarp_sq_wqe_word_idx { | ||
484 | NES_IWARP_SQ_WQE_MISC_IDX = 0, | ||
485 | NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1, | ||
486 | NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2, | ||
487 | NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3, | ||
488 | NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4, | ||
489 | NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5, | ||
490 | NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7, | ||
491 | NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8, | ||
492 | NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9, | ||
493 | NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10, | ||
494 | NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11, | ||
495 | NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12, | ||
496 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16, | ||
497 | NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17, | ||
498 | NES_IWARP_SQ_WQE_LENGTH0_IDX = 18, | ||
499 | NES_IWARP_SQ_WQE_STAG0_IDX = 19, | ||
500 | NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20, | ||
501 | NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21, | ||
502 | NES_IWARP_SQ_WQE_LENGTH1_IDX = 22, | ||
503 | NES_IWARP_SQ_WQE_STAG1_IDX = 23, | ||
504 | NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24, | ||
505 | NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25, | ||
506 | NES_IWARP_SQ_WQE_LENGTH2_IDX = 26, | ||
507 | NES_IWARP_SQ_WQE_STAG2_IDX = 27, | ||
508 | NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28, | ||
509 | NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29, | ||
510 | NES_IWARP_SQ_WQE_LENGTH3_IDX = 30, | ||
511 | NES_IWARP_SQ_WQE_STAG3_IDX = 31, | ||
512 | }; | ||
513 | |||
514 | enum nes_iwarp_sq_bind_wqe_word_idx { | ||
515 | NES_IWARP_SQ_BIND_WQE_MR_IDX = 6, | ||
516 | NES_IWARP_SQ_BIND_WQE_MW_IDX = 7, | ||
517 | NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX = 8, | ||
518 | NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX = 9, | ||
519 | NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX = 10, | ||
520 | NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX = 11, | ||
521 | }; | ||
522 | |||
523 | enum nes_iwarp_sq_fmr_wqe_word_idx { | ||
524 | NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX = 7, | ||
525 | NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX = 8, | ||
526 | NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX = 9, | ||
527 | NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX = 10, | ||
528 | NES_IWARP_SQ_FMR_WQE_VA_FBO_HIGH_IDX = 11, | ||
529 | NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX = 12, | ||
530 | NES_IWARP_SQ_FMR_WQE_PBL_ADDR_HIGH_IDX = 13, | ||
531 | NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14, | ||
532 | }; | ||
533 | |||
534 | enum nes_iwarp_sq_locinv_wqe_word_idx { | ||
535 | NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6, | ||
536 | }; | ||
537 | |||
538 | |||
539 | enum nes_iwarp_rq_wqe_word_idx { | ||
540 | NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1, | ||
541 | NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2, | ||
542 | NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3, | ||
543 | NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4, | ||
544 | NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5, | ||
545 | NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8, | ||
546 | NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9, | ||
547 | NES_IWARP_RQ_WQE_LENGTH0_IDX = 10, | ||
548 | NES_IWARP_RQ_WQE_STAG0_IDX = 11, | ||
549 | NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12, | ||
550 | NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13, | ||
551 | NES_IWARP_RQ_WQE_LENGTH1_IDX = 14, | ||
552 | NES_IWARP_RQ_WQE_STAG1_IDX = 15, | ||
553 | NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16, | ||
554 | NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17, | ||
555 | NES_IWARP_RQ_WQE_LENGTH2_IDX = 18, | ||
556 | NES_IWARP_RQ_WQE_STAG2_IDX = 19, | ||
557 | NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20, | ||
558 | NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21, | ||
559 | NES_IWARP_RQ_WQE_LENGTH3_IDX = 22, | ||
560 | NES_IWARP_RQ_WQE_STAG3_IDX = 23, | ||
561 | }; | ||
562 | |||
563 | enum nes_nic_sq_wqe_bits { | ||
564 | NES_NIC_SQ_WQE_PHDR_CS_READY = (1<<21), | ||
565 | NES_NIC_SQ_WQE_LSO_ENABLE = (1<<22), | ||
566 | NES_NIC_SQ_WQE_TAGVALUE_ENABLE = (1<<23), | ||
567 | NES_NIC_SQ_WQE_DISABLE_CHKSUM = (1<<30), | ||
568 | NES_NIC_SQ_WQE_COMPLETION = (1<<31), | ||
569 | }; | ||
570 | |||
571 | enum nes_nic_cqe_word_idx { | ||
572 | NES_NIC_CQE_ACCQP_ID_IDX = 0, | ||
573 | NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2, | ||
574 | NES_NIC_CQE_MISC_IDX = 3, | ||
575 | }; | ||
576 | |||
577 | #define NES_PKT_TYPE_APBVT_BITS 0xC112 | ||
578 | #define NES_PKT_TYPE_APBVT_MASK 0xff3e | ||
579 | |||
580 | #define NES_PKT_TYPE_PVALID_BITS 0x10000000 | ||
581 | #define NES_PKT_TYPE_PVALID_MASK 0x30000000 | ||
582 | |||
583 | #define NES_PKT_TYPE_TCPV4_BITS 0x0110 | ||
584 | #define NES_PKT_TYPE_TCPV4_MASK 0x3f30 | ||
585 | |||
586 | #define NES_PKT_TYPE_UDPV4_BITS 0x0210 | ||
587 | #define NES_PKT_TYPE_UDPV4_MASK 0x3f30 | ||
588 | |||
589 | #define NES_PKT_TYPE_IPV4_BITS 0x0010 | ||
590 | #define NES_PKT_TYPE_IPV4_MASK 0x3f30 | ||
591 | |||
592 | #define NES_PKT_TYPE_OTHER_BITS 0x0000 | ||
593 | #define NES_PKT_TYPE_OTHER_MASK 0x0030 | ||
594 | |||
595 | #define NES_NIC_CQE_ERRV_SHIFT 16 | ||
596 | enum nes_nic_ev_bits { | ||
597 | NES_NIC_ERRV_BITS_MODE = (1<<0), | ||
598 | NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1), | ||
599 | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2), | ||
600 | NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3), | ||
601 | NES_NIC_ERRV_BITS_IPH_ERR = (1<<4), | ||
602 | }; | ||
603 | |||
604 | enum nes_nic_cqe_bits { | ||
605 | NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT), | ||
606 | NES_NIC_CQE_SQ = (1<<24), | ||
607 | NES_NIC_CQE_ACCQP_PORT = (1<<28), | ||
608 | NES_NIC_CQE_ACCQP_VALID = (1<<29), | ||
609 | NES_NIC_CQE_TAG_VALID = (1<<30), | ||
610 | NES_NIC_CQE_VALID = (1<<31), | ||
611 | }; | ||
612 | |||
613 | enum nes_aeqe_word_idx { | ||
614 | NES_AEQE_COMP_CTXT_LOW_IDX = 0, | ||
615 | NES_AEQE_COMP_CTXT_HIGH_IDX = 1, | ||
616 | NES_AEQE_COMP_QP_CQ_ID_IDX = 2, | ||
617 | NES_AEQE_MISC_IDX = 3, | ||
618 | }; | ||
619 | |||
620 | enum nes_aeqe_bits { | ||
621 | NES_AEQE_QP = (1<<16), | ||
622 | NES_AEQE_CQ = (1<<17), | ||
623 | NES_AEQE_SQ = (1<<18), | ||
624 | NES_AEQE_INBOUND_RDMA = (1<<19), | ||
625 | NES_AEQE_IWARP_STATE_MASK = (7<<20), | ||
626 | NES_AEQE_TCP_STATE_MASK = (0xf<<24), | ||
627 | NES_AEQE_VALID = (1<<31), | ||
628 | }; | ||
629 | |||
630 | #define NES_AEQE_IWARP_STATE_SHIFT 20 | ||
631 | #define NES_AEQE_TCP_STATE_SHIFT 24 | ||
632 | |||
633 | enum nes_aeqe_iwarp_state { | ||
634 | NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, | ||
635 | NES_AEQE_IWARP_STATE_IDLE = 1, | ||
636 | NES_AEQE_IWARP_STATE_RTS = 2, | ||
637 | NES_AEQE_IWARP_STATE_CLOSING = 3, | ||
638 | NES_AEQE_IWARP_STATE_TERMINATE = 5, | ||
639 | NES_AEQE_IWARP_STATE_ERROR = 6 | ||
640 | }; | ||
641 | |||
642 | enum nes_aeqe_tcp_state { | ||
643 | NES_AEQE_TCP_STATE_NON_EXISTANT = 0, | ||
644 | NES_AEQE_TCP_STATE_CLOSED = 1, | ||
645 | NES_AEQE_TCP_STATE_LISTEN = 2, | ||
646 | NES_AEQE_TCP_STATE_SYN_SENT = 3, | ||
647 | NES_AEQE_TCP_STATE_SYN_RCVD = 4, | ||
648 | NES_AEQE_TCP_STATE_ESTABLISHED = 5, | ||
649 | NES_AEQE_TCP_STATE_CLOSE_WAIT = 6, | ||
650 | NES_AEQE_TCP_STATE_FIN_WAIT_1 = 7, | ||
651 | NES_AEQE_TCP_STATE_CLOSING = 8, | ||
652 | NES_AEQE_TCP_STATE_LAST_ACK = 9, | ||
653 | NES_AEQE_TCP_STATE_FIN_WAIT_2 = 10, | ||
654 | NES_AEQE_TCP_STATE_TIME_WAIT = 11 | ||
655 | }; | ||
656 | |||
657 | enum nes_aeqe_aeid { | ||
658 | NES_AEQE_AEID_AMP_UNALLOCATED_STAG = 0x0102, | ||
659 | NES_AEQE_AEID_AMP_INVALID_STAG = 0x0103, | ||
660 | NES_AEQE_AEID_AMP_BAD_QP = 0x0104, | ||
661 | NES_AEQE_AEID_AMP_BAD_PD = 0x0105, | ||
662 | NES_AEQE_AEID_AMP_BAD_STAG_KEY = 0x0106, | ||
663 | NES_AEQE_AEID_AMP_BAD_STAG_INDEX = 0x0107, | ||
664 | NES_AEQE_AEID_AMP_BOUNDS_VIOLATION = 0x0108, | ||
665 | NES_AEQE_AEID_AMP_RIGHTS_VIOLATION = 0x0109, | ||
666 | NES_AEQE_AEID_AMP_TO_WRAP = 0x010a, | ||
667 | NES_AEQE_AEID_AMP_FASTREG_SHARED = 0x010b, | ||
668 | NES_AEQE_AEID_AMP_FASTREG_VALID_STAG = 0x010c, | ||
669 | NES_AEQE_AEID_AMP_FASTREG_MW_STAG = 0x010d, | ||
670 | NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS = 0x010e, | ||
671 | NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW = 0x010f, | ||
672 | NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH = 0x0110, | ||
673 | NES_AEQE_AEID_AMP_INVALIDATE_SHARED = 0x0111, | ||
674 | NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS = 0x0112, | ||
675 | NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS = 0x0113, | ||
676 | NES_AEQE_AEID_AMP_MWBIND_VALID_STAG = 0x0114, | ||
677 | NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG = 0x0115, | ||
678 | NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG = 0x0116, | ||
679 | NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG = 0x0117, | ||
680 | NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS = 0x0118, | ||
681 | NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS = 0x0119, | ||
682 | NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT = 0x011a, | ||
683 | NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED = 0x011b, | ||
684 | NES_AEQE_AEID_BAD_CLOSE = 0x0201, | ||
685 | NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE = 0x0202, | ||
686 | NES_AEQE_AEID_CQ_OPERATION_ERROR = 0x0203, | ||
687 | NES_AEQE_AEID_PRIV_OPERATION_DENIED = 0x0204, | ||
688 | NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO = 0x0205, | ||
689 | NES_AEQE_AEID_STAG_ZERO_INVALID = 0x0206, | ||
690 | NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN = 0x0301, | ||
691 | NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID = 0x0302, | ||
692 | NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER = 0x0303, | ||
693 | NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION = 0x0304, | ||
694 | NES_AEQE_AEID_DDP_UBE_INVALID_MO = 0x0305, | ||
695 | NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE = 0x0306, | ||
696 | NES_AEQE_AEID_DDP_UBE_INVALID_QN = 0x0307, | ||
697 | NES_AEQE_AEID_DDP_NO_L_BIT = 0x0308, | ||
698 | NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION = 0x0311, | ||
699 | NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE = 0x0312, | ||
700 | NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST = 0x0313, | ||
701 | NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP = 0x0314, | ||
702 | NES_AEQE_AEID_INVALID_ARP_ENTRY = 0x0401, | ||
703 | NES_AEQE_AEID_INVALID_TCP_OPTION_RCVD = 0x0402, | ||
704 | NES_AEQE_AEID_STALE_ARP_ENTRY = 0x0403, | ||
705 | NES_AEQE_AEID_LLP_CLOSE_COMPLETE = 0x0501, | ||
706 | NES_AEQE_AEID_LLP_CONNECTION_RESET = 0x0502, | ||
707 | NES_AEQE_AEID_LLP_FIN_RECEIVED = 0x0503, | ||
708 | NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH = 0x0504, | ||
709 | NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR = 0x0505, | ||
710 | NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE = 0x0506, | ||
711 | NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL = 0x0507, | ||
712 | NES_AEQE_AEID_LLP_SYN_RECEIVED = 0x0508, | ||
713 | NES_AEQE_AEID_LLP_TERMINATE_RECEIVED = 0x0509, | ||
714 | NES_AEQE_AEID_LLP_TOO_MANY_RETRIES = 0x050a, | ||
715 | NES_AEQE_AEID_LLP_TOO_MANY_KEEPALIVE_RETRIES = 0x050b, | ||
716 | NES_AEQE_AEID_RESET_SENT = 0x0601, | ||
717 | NES_AEQE_AEID_TERMINATE_SENT = 0x0602, | ||
718 | NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC = 0x0700 | ||
719 | }; | ||
720 | |||
721 | enum nes_iwarp_sq_opcodes { | ||
722 | NES_IWARP_SQ_WQE_WRPDU = (1<<15), | ||
723 | NES_IWARP_SQ_WQE_PSH = (1<<21), | ||
724 | NES_IWARP_SQ_WQE_STREAMING = (1<<23), | ||
725 | NES_IWARP_SQ_WQE_IMM_DATA = (1<<28), | ||
726 | NES_IWARP_SQ_WQE_READ_FENCE = (1<<29), | ||
727 | NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30), | ||
728 | NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31), | ||
729 | }; | ||
730 | |||
731 | enum nes_iwarp_sq_wqe_bits { | ||
732 | NES_IWARP_SQ_OP_RDMAW = 0, | ||
733 | NES_IWARP_SQ_OP_RDMAR = 1, | ||
734 | NES_IWARP_SQ_OP_SEND = 3, | ||
735 | NES_IWARP_SQ_OP_SENDINV = 4, | ||
736 | NES_IWARP_SQ_OP_SENDSE = 5, | ||
737 | NES_IWARP_SQ_OP_SENDSEINV = 6, | ||
738 | NES_IWARP_SQ_OP_BIND = 8, | ||
739 | NES_IWARP_SQ_OP_FAST_REG = 9, | ||
740 | NES_IWARP_SQ_OP_LOCINV = 10, | ||
741 | NES_IWARP_SQ_OP_RDMAR_LOCINV = 11, | ||
742 | NES_IWARP_SQ_OP_NOP = 12, | ||
743 | }; | ||
744 | |||
745 | #define NES_EEPROM_READ_REQUEST (1<<16) | ||
746 | #define NES_MAC_ADDR_VALID (1<<20) | ||
747 | |||
748 | /* | ||
749 | * NES index registers init values. | ||
750 | */ | ||
751 | struct nes_init_values { | ||
752 | u32 index; | ||
753 | u32 data; | ||
754 | u8 wrt; | ||
755 | }; | ||
756 | |||
757 | /* | ||
758 | * NES registers in BAR0. | ||
759 | */ | ||
760 | struct nes_pci_regs { | ||
761 | u32 int_status; | ||
762 | u32 int_mask; | ||
763 | u32 int_pending; | ||
764 | u32 intf_int_status; | ||
765 | u32 intf_int_mask; | ||
766 | u32 other_regs[59]; /* pad out to 256 bytes for now */ | ||
767 | }; | ||
768 | |||
769 | #define NES_CQP_SQ_SIZE 128 | ||
770 | #define NES_CCQ_SIZE 128 | ||
771 | #define NES_NIC_WQ_SIZE 512 | ||
772 | #define NES_NIC_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_512) | (NES_NIC_CTX_SQ_SIZE_512)) | ||
773 | #define NES_NIC_BACK_STORE 0x00038000 | ||
774 | |||
775 | struct nes_device; | ||
776 | |||
777 | struct nes_hw_nic_qp_context { | ||
778 | __le32 context_words[6]; | ||
779 | }; | ||
780 | |||
781 | struct nes_hw_nic_sq_wqe { | ||
782 | __le32 wqe_words[16]; | ||
783 | }; | ||
784 | |||
785 | struct nes_hw_nic_rq_wqe { | ||
786 | __le32 wqe_words[16]; | ||
787 | }; | ||
788 | |||
789 | struct nes_hw_nic_cqe { | ||
790 | __le32 cqe_words[4]; | ||
791 | }; | ||
792 | |||
793 | struct nes_hw_cqp_qp_context { | ||
794 | __le32 context_words[4]; | ||
795 | }; | ||
796 | |||
797 | struct nes_hw_cqp_wqe { | ||
798 | __le32 wqe_words[16]; | ||
799 | }; | ||
800 | |||
801 | struct nes_hw_qp_wqe { | ||
802 | __le32 wqe_words[32]; | ||
803 | }; | ||
804 | |||
805 | struct nes_hw_cqe { | ||
806 | __le32 cqe_words[8]; | ||
807 | }; | ||
808 | |||
809 | struct nes_hw_ceqe { | ||
810 | __le32 ceqe_words[2]; | ||
811 | }; | ||
812 | |||
813 | struct nes_hw_aeqe { | ||
814 | __le32 aeqe_words[4]; | ||
815 | }; | ||
816 | |||
817 | struct nes_cqp_request { | ||
818 | union { | ||
819 | u64 cqp_callback_context; | ||
820 | void *cqp_callback_pointer; | ||
821 | }; | ||
822 | wait_queue_head_t waitq; | ||
823 | struct nes_hw_cqp_wqe cqp_wqe; | ||
824 | struct list_head list; | ||
825 | atomic_t refcount; | ||
826 | void (*cqp_callback)(struct nes_device *nesdev, struct nes_cqp_request *cqp_request); | ||
827 | u16 major_code; | ||
828 | u16 minor_code; | ||
829 | u8 waiting; | ||
830 | u8 request_done; | ||
831 | u8 dynamic; | ||
832 | u8 callback; | ||
833 | }; | ||
834 | |||
835 | struct nes_hw_cqp { | ||
836 | struct nes_hw_cqp_wqe *sq_vbase; | ||
837 | dma_addr_t sq_pbase; | ||
838 | spinlock_t lock; | ||
839 | wait_queue_head_t waitq; | ||
840 | u16 qp_id; | ||
841 | u16 sq_head; | ||
842 | u16 sq_tail; | ||
843 | u16 sq_size; | ||
844 | }; | ||
845 | |||
846 | #define NES_FIRST_FRAG_SIZE 128 | ||
847 | struct nes_first_frag { | ||
848 | u8 buffer[NES_FIRST_FRAG_SIZE]; | ||
849 | }; | ||
850 | |||
851 | struct nes_hw_nic { | ||
852 | struct nes_first_frag *first_frag_vbase; /* virtual address of first frags */ | ||
853 | struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */ | ||
854 | struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */ | ||
855 | struct sk_buff *tx_skb[NES_NIC_WQ_SIZE]; | ||
856 | struct sk_buff *rx_skb[NES_NIC_WQ_SIZE]; | ||
857 | dma_addr_t frag_paddr[NES_NIC_WQ_SIZE]; | ||
858 | unsigned long first_frag_overflow[BITS_TO_LONGS(NES_NIC_WQ_SIZE)]; | ||
859 | dma_addr_t sq_pbase; /* PCI memory for host rings */ | ||
860 | dma_addr_t rq_pbase; /* PCI memory for host rings */ | ||
861 | |||
862 | u16 qp_id; | ||
863 | u16 sq_head; | ||
864 | u16 sq_tail; | ||
865 | u16 sq_size; | ||
866 | u16 rq_head; | ||
867 | u16 rq_tail; | ||
868 | u16 rq_size; | ||
869 | u8 replenishing_rq; | ||
870 | u8 reserved; | ||
871 | |||
872 | spinlock_t sq_lock; | ||
873 | spinlock_t rq_lock; | ||
874 | }; | ||
875 | |||
876 | struct nes_hw_nic_cq { | ||
877 | struct nes_hw_nic_cqe volatile *cq_vbase; /* PCI memory for host rings */ | ||
878 | void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq); | ||
879 | dma_addr_t cq_pbase; /* PCI memory for host rings */ | ||
880 | int rx_cqes_completed; | ||
881 | int cqe_allocs_pending; | ||
882 | int rx_pkts_indicated; | ||
883 | u16 cq_head; | ||
884 | u16 cq_size; | ||
885 | u16 cq_number; | ||
886 | u8 cqes_pending; | ||
887 | }; | ||
888 | |||
889 | struct nes_hw_qp { | ||
890 | struct nes_hw_qp_wqe *sq_vbase; /* PCI memory for host rings */ | ||
891 | struct nes_hw_qp_wqe *rq_vbase; /* PCI memory for host rings */ | ||
892 | void *q2_vbase; /* PCI memory for host rings */ | ||
893 | dma_addr_t sq_pbase; /* PCI memory for host rings */ | ||
894 | dma_addr_t rq_pbase; /* PCI memory for host rings */ | ||
895 | dma_addr_t q2_pbase; /* PCI memory for host rings */ | ||
896 | u32 qp_id; | ||
897 | u16 sq_head; | ||
898 | u16 sq_tail; | ||
899 | u16 sq_size; | ||
900 | u16 rq_head; | ||
901 | u16 rq_tail; | ||
902 | u16 rq_size; | ||
903 | u8 rq_encoded_size; | ||
904 | u8 sq_encoded_size; | ||
905 | }; | ||
906 | |||
907 | struct nes_hw_cq { | ||
908 | struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ | ||
909 | void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); | ||
910 | dma_addr_t cq_pbase; /* PCI memory for host rings */ | ||
911 | u16 cq_head; | ||
912 | u16 cq_size; | ||
913 | u16 cq_number; | ||
914 | }; | ||
915 | |||
916 | struct nes_hw_ceq { | ||
917 | struct nes_hw_ceqe volatile *ceq_vbase; /* PCI memory for host rings */ | ||
918 | dma_addr_t ceq_pbase; /* PCI memory for host rings */ | ||
919 | u16 ceq_head; | ||
920 | u16 ceq_size; | ||
921 | }; | ||
922 | |||
923 | struct nes_hw_aeq { | ||
924 | struct nes_hw_aeqe volatile *aeq_vbase; /* PCI memory for host rings */ | ||
925 | dma_addr_t aeq_pbase; /* PCI memory for host rings */ | ||
926 | u16 aeq_head; | ||
927 | u16 aeq_size; | ||
928 | }; | ||
929 | |||
930 | struct nic_qp_map { | ||
931 | u8 qpid; | ||
932 | u8 nic_index; | ||
933 | u8 logical_port; | ||
934 | u8 is_hnic; | ||
935 | }; | ||
936 | |||
937 | #define NES_CQP_ARP_AEQ_INDEX_MASK 0x000f0000 | ||
938 | #define NES_CQP_ARP_AEQ_INDEX_SHIFT 16 | ||
939 | |||
940 | #define NES_CQP_APBVT_ADD 0x00008000 | ||
941 | #define NES_CQP_APBVT_NIC_SHIFT 16 | ||
942 | |||
943 | #define NES_ARP_ADD 1 | ||
944 | #define NES_ARP_DELETE 2 | ||
945 | #define NES_ARP_RESOLVE 3 | ||
946 | |||
947 | #define NES_MAC_SW_IDLE 0 | ||
948 | #define NES_MAC_SW_INTERRUPT 1 | ||
949 | #define NES_MAC_SW_MH 2 | ||
950 | |||
951 | struct nes_arp_entry { | ||
952 | u32 ip_addr; | ||
953 | u8 mac_addr[ETH_ALEN]; | ||
954 | }; | ||
955 | |||
956 | #define NES_NIC_FAST_TIMER 96 | ||
957 | #define NES_NIC_FAST_TIMER_LOW 40 | ||
958 | #define NES_NIC_FAST_TIMER_HIGH 1000 | ||
959 | #define DEFAULT_NES_QL_HIGH 256 | ||
960 | #define DEFAULT_NES_QL_LOW 16 | ||
961 | #define DEFAULT_NES_QL_TARGET 64 | ||
962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 | ||
963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 | ||
964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 | ||
965 | #define NES_NIC_CQ_DOWNWARD_TREND 8 | ||
966 | |||
967 | struct nes_hw_tune_timer { | ||
968 | //u16 cq_count; | ||
969 | u16 threshold_low; | ||
970 | u16 threshold_target; | ||
971 | u16 threshold_high; | ||
972 | u16 timer_in_use; | ||
973 | u16 timer_in_use_old; | ||
974 | u16 timer_in_use_min; | ||
975 | u16 timer_in_use_max; | ||
976 | u8 timer_direction_upward; | ||
977 | u8 timer_direction_downward; | ||
978 | u16 cq_count_old; | ||
979 | u8 cq_direction_downward; | ||
980 | }; | ||
981 | |||
982 | #define NES_TIMER_INT_LIMIT 2 | ||
983 | #define NES_TIMER_INT_LIMIT_DYNAMIC 10 | ||
984 | #define NES_TIMER_ENABLE_LIMIT 4 | ||
985 | #define NES_MAX_LINK_INTERRUPTS 128 | ||
986 | #define NES_MAX_LINK_CHECK 200 | ||
987 | |||
988 | struct nes_adapter { | ||
989 | u64 fw_ver; | ||
990 | unsigned long *allocated_qps; | ||
991 | unsigned long *allocated_cqs; | ||
992 | unsigned long *allocated_mrs; | ||
993 | unsigned long *allocated_pds; | ||
994 | unsigned long *allocated_arps; | ||
995 | struct nes_qp **qp_table; | ||
996 | struct workqueue_struct *work_q; | ||
997 | |||
998 | struct list_head list; | ||
999 | struct list_head active_listeners; | ||
1000 | /* list of the netdev's associated with each logical port */ | ||
1001 | struct list_head nesvnic_list[4]; | ||
1002 | |||
1003 | struct timer_list mh_timer; | ||
1004 | struct timer_list lc_timer; | ||
1005 | struct work_struct work; | ||
1006 | spinlock_t resource_lock; | ||
1007 | spinlock_t phy_lock; | ||
1008 | spinlock_t pbl_lock; | ||
1009 | spinlock_t periodic_timer_lock; | ||
1010 | |||
1011 | struct nes_arp_entry arp_table[NES_MAX_ARP_TABLE_SIZE]; | ||
1012 | |||
1013 | /* Adapter CEQ and AEQs */ | ||
1014 | struct nes_hw_ceq ceq[16]; | ||
1015 | struct nes_hw_aeq aeq[8]; | ||
1016 | |||
1017 | struct nes_hw_tune_timer tune_timer; | ||
1018 | |||
1019 | unsigned long doorbell_start; | ||
1020 | |||
1021 | u32 hw_rev; | ||
1022 | u32 vendor_id; | ||
1023 | u32 vendor_part_id; | ||
1024 | u32 device_cap_flags; | ||
1025 | u32 tick_delta; | ||
1026 | u32 timer_int_req; | ||
1027 | u32 arp_table_size; | ||
1028 | u32 next_arp_index; | ||
1029 | |||
1030 | u32 max_mr; | ||
1031 | u32 max_256pbl; | ||
1032 | u32 max_4kpbl; | ||
1033 | u32 free_256pbl; | ||
1034 | u32 free_4kpbl; | ||
1035 | u32 max_mr_size; | ||
1036 | u32 max_qp; | ||
1037 | u32 next_qp; | ||
1038 | u32 max_irrq; | ||
1039 | u32 max_qp_wr; | ||
1040 | u32 max_sge; | ||
1041 | u32 max_cq; | ||
1042 | u32 next_cq; | ||
1043 | u32 max_cqe; | ||
1044 | u32 max_pd; | ||
1045 | u32 base_pd; | ||
1046 | u32 next_pd; | ||
1047 | u32 hte_index_mask; | ||
1048 | |||
1049 | /* EEPROM information */ | ||
1050 | u32 rx_pool_size; | ||
1051 | u32 tx_pool_size; | ||
1052 | u32 rx_threshold; | ||
1053 | u32 tcp_timer_core_clk_divisor; | ||
1054 | u32 iwarp_config; | ||
1055 | u32 cm_config; | ||
1056 | u32 sws_timer_config; | ||
1057 | u32 tcp_config1; | ||
1058 | u32 wqm_wat; | ||
1059 | u32 core_clock; | ||
1060 | u32 firmware_version; | ||
1061 | |||
1062 | u32 nic_rx_eth_route_err; | ||
1063 | |||
1064 | u32 et_rx_coalesce_usecs; | ||
1065 | u32 et_rx_max_coalesced_frames; | ||
1066 | u32 et_rx_coalesce_usecs_irq; | ||
1067 | u32 et_rx_max_coalesced_frames_irq; | ||
1068 | u32 et_pkt_rate_low; | ||
1069 | u32 et_rx_coalesce_usecs_low; | ||
1070 | u32 et_rx_max_coalesced_frames_low; | ||
1071 | u32 et_pkt_rate_high; | ||
1072 | u32 et_rx_coalesce_usecs_high; | ||
1073 | u32 et_rx_max_coalesced_frames_high; | ||
1074 | u32 et_rate_sample_interval; | ||
1075 | u32 timer_int_limit; | ||
1076 | |||
1077 | /* Adapter base MAC address */ | ||
1078 | u32 mac_addr_low; | ||
1079 | u16 mac_addr_high; | ||
1080 | |||
1081 | u16 firmware_eeprom_offset; | ||
1082 | u16 software_eeprom_offset; | ||
1083 | |||
1084 | u16 max_irrq_wr; | ||
1085 | |||
1086 | /* pd config for each port */ | ||
1087 | u16 pd_config_size[4]; | ||
1088 | u16 pd_config_base[4]; | ||
1089 | |||
1090 | u16 link_interrupt_count[4]; | ||
1091 | |||
1092 | /* the phy index for each port */ | ||
1093 | u8 phy_index[4]; | ||
1094 | u8 mac_sw_state[4]; | ||
1095 | u8 mac_link_down[4]; | ||
1096 | u8 phy_type[4]; | ||
1097 | |||
1098 | /* PCI information */ | ||
1099 | unsigned int devfn; | ||
1100 | unsigned char bus_number; | ||
1101 | unsigned char OneG_Mode; | ||
1102 | |||
1103 | unsigned char ref_count; | ||
1104 | u8 netdev_count; | ||
1105 | u8 netdev_max; /* from host nic address count in EEPROM */ | ||
1106 | u8 port_count; | ||
1107 | u8 virtwq; | ||
1108 | u8 et_use_adaptive_rx_coalesce; | ||
1109 | u8 adapter_fcn_count; | ||
1110 | }; | ||
1111 | |||
1112 | struct nes_pbl { | ||
1113 | u64 *pbl_vbase; | ||
1114 | dma_addr_t pbl_pbase; | ||
1115 | struct page *page; | ||
1116 | unsigned long user_base; | ||
1117 | u32 pbl_size; | ||
1118 | struct list_head list; | ||
1119 | /* TODO: need to add list for two level tables */ | ||
1120 | }; | ||
1121 | |||
1122 | struct nes_listener { | ||
1123 | struct work_struct work; | ||
1124 | struct workqueue_struct *wq; | ||
1125 | struct nes_vnic *nesvnic; | ||
1126 | struct iw_cm_id *cm_id; | ||
1127 | struct list_head list; | ||
1128 | unsigned long socket; | ||
1129 | u8 accept_failed; | ||
1130 | }; | ||
1131 | |||
1132 | struct nes_ib_device; | ||
1133 | |||
1134 | struct nes_vnic { | ||
1135 | struct nes_ib_device *nesibdev; | ||
1136 | u64 sq_full; | ||
1137 | u64 sq_locked; | ||
1138 | u64 tso_requests; | ||
1139 | u64 segmented_tso_requests; | ||
1140 | u64 linearized_skbs; | ||
1141 | u64 tx_sw_dropped; | ||
1142 | u64 endnode_nstat_rx_discard; | ||
1143 | u64 endnode_nstat_rx_octets; | ||
1144 | u64 endnode_nstat_rx_frames; | ||
1145 | u64 endnode_nstat_tx_octets; | ||
1146 | u64 endnode_nstat_tx_frames; | ||
1147 | u64 endnode_ipv4_tcp_retransmits; | ||
1148 | /* void *mem; */ | ||
1149 | struct nes_device *nesdev; | ||
1150 | struct net_device *netdev; | ||
1151 | struct vlan_group *vlan_grp; | ||
1152 | atomic_t rx_skbs_needed; | ||
1153 | atomic_t rx_skb_timer_running; | ||
1154 | int budget; | ||
1155 | u32 msg_enable; | ||
1156 | /* u32 tx_avail; */ | ||
1157 | __be32 local_ipaddr; | ||
1158 | struct napi_struct napi; | ||
1159 | spinlock_t tx_lock; /* could use netdev tx lock? */ | ||
1160 | struct timer_list rq_wqes_timer; | ||
1161 | u32 nic_mem_size; | ||
1162 | void *nic_vbase; | ||
1163 | dma_addr_t nic_pbase; | ||
1164 | struct nes_hw_nic nic; | ||
1165 | struct nes_hw_nic_cq nic_cq; | ||
1166 | u32 mcrq_qp_id; | ||
1167 | struct nes_ucontext *mcrq_ucontext; | ||
1168 | struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev); | ||
1169 | void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *, int); | ||
1170 | int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr ); | ||
1171 | struct net_device_stats netstats; | ||
1172 | /* used to put the netdev on the adapters logical port list */ | ||
1173 | struct list_head list; | ||
1174 | u16 max_frame_size; | ||
1175 | u8 netdev_open; | ||
1176 | u8 linkup; | ||
1177 | u8 logical_port; | ||
1178 | u8 netdev_index; /* might not be needed, indexes nesdev->netdev */ | ||
1179 | u8 perfect_filter_index; | ||
1180 | u8 nic_index; | ||
1181 | u8 qp_nic_index[4]; | ||
1182 | u8 next_qp_nic_index; | ||
1183 | u8 of_device_registered; | ||
1184 | u8 rdma_enabled; | ||
1185 | u8 rx_checksum_disabled; | ||
1186 | }; | ||
1187 | |||
1188 | struct nes_ib_device { | ||
1189 | struct ib_device ibdev; | ||
1190 | struct nes_vnic *nesvnic; | ||
1191 | |||
1192 | /* Virtual RNIC Limits */ | ||
1193 | u32 max_mr; | ||
1194 | u32 max_qp; | ||
1195 | u32 max_cq; | ||
1196 | u32 max_pd; | ||
1197 | u32 num_mr; | ||
1198 | u32 num_qp; | ||
1199 | u32 num_cq; | ||
1200 | u32 num_pd; | ||
1201 | }; | ||
1202 | |||
1203 | #define nes_vlan_rx vlan_hwaccel_receive_skb | ||
1204 | #define nes_netif_rx netif_receive_skb | ||
1205 | |||
1206 | #endif /* __NES_HW_H */ | ||
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c new file mode 100644 index 000000000000..b6cc265aa9a4 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -0,0 +1,1703 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/if_arp.h> | ||
41 | #include <linux/if_vlan.h> | ||
42 | #include <linux/ethtool.h> | ||
43 | #include <net/tcp.h> | ||
44 | |||
45 | #include <net/inet_common.h> | ||
46 | #include <linux/inet.h> | ||
47 | |||
48 | #include "nes.h" | ||
49 | |||
50 | static struct nic_qp_map nic_qp_mapping_0[] = { | ||
51 | {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0}, | ||
52 | {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}, | ||
53 | {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0}, | ||
54 | {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} | ||
55 | }; | ||
56 | |||
57 | static struct nic_qp_map nic_qp_mapping_1[] = { | ||
58 | {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0}, | ||
59 | {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} | ||
60 | }; | ||
61 | |||
62 | static struct nic_qp_map nic_qp_mapping_2[] = { | ||
63 | {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0} | ||
64 | }; | ||
65 | |||
66 | static struct nic_qp_map nic_qp_mapping_3[] = { | ||
67 | {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} | ||
68 | }; | ||
69 | |||
70 | static struct nic_qp_map nic_qp_mapping_4[] = { | ||
71 | {28,8,0,0},{32,12,0,0} | ||
72 | }; | ||
73 | |||
74 | static struct nic_qp_map nic_qp_mapping_5[] = { | ||
75 | {29,9,1,0},{33,13,1,0} | ||
76 | }; | ||
77 | |||
78 | static struct nic_qp_map nic_qp_mapping_6[] = { | ||
79 | {30,10,2,0},{34,14,2,0} | ||
80 | }; | ||
81 | |||
82 | static struct nic_qp_map nic_qp_mapping_7[] = { | ||
83 | {31,11,3,0},{35,15,3,0} | ||
84 | }; | ||
85 | |||
86 | static struct nic_qp_map *nic_qp_mapping_per_function[] = { | ||
87 | nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3, | ||
88 | nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7 | ||
89 | }; | ||
90 | |||
91 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | ||
92 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | ||
93 | static int debug = -1; | ||
94 | |||
95 | |||
96 | static int nes_netdev_open(struct net_device *); | ||
97 | static int nes_netdev_stop(struct net_device *); | ||
98 | static int nes_netdev_start_xmit(struct sk_buff *, struct net_device *); | ||
99 | static struct net_device_stats *nes_netdev_get_stats(struct net_device *); | ||
100 | static void nes_netdev_tx_timeout(struct net_device *); | ||
101 | static int nes_netdev_set_mac_address(struct net_device *, void *); | ||
102 | static int nes_netdev_change_mtu(struct net_device *, int); | ||
103 | |||
104 | /** | ||
105 | * nes_netdev_poll | ||
106 | */ | ||
107 | static int nes_netdev_poll(struct napi_struct *napi, int budget) | ||
108 | { | ||
109 | struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi); | ||
110 | struct net_device *netdev = nesvnic->netdev; | ||
111 | struct nes_device *nesdev = nesvnic->nesdev; | ||
112 | struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq; | ||
113 | |||
114 | nesvnic->budget = budget; | ||
115 | nescq->cqes_pending = 0; | ||
116 | nescq->rx_cqes_completed = 0; | ||
117 | nescq->cqe_allocs_pending = 0; | ||
118 | nescq->rx_pkts_indicated = 0; | ||
119 | |||
120 | nes_nic_ce_handler(nesdev, nescq); | ||
121 | |||
122 | if (nescq->cqes_pending == 0) { | ||
123 | netif_rx_complete(netdev, napi); | ||
124 | /* clear out completed cqes and arm */ | ||
125 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | ||
126 | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); | ||
127 | nes_read32(nesdev->regs+NES_CQE_ALLOC); | ||
128 | } else { | ||
129 | /* clear out completed cqes but don't arm */ | ||
130 | nes_write32(nesdev->regs+NES_CQE_ALLOC, | ||
131 | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); | ||
132 | nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n", | ||
133 | nesvnic->netdev->name); | ||
134 | } | ||
135 | return nescq->rx_pkts_indicated; | ||
136 | } | ||
137 | |||
138 | |||
139 | /** | ||
140 | * nes_netdev_open - Activate the network interface; ifconfig | ||
141 | * ethx up. | ||
142 | */ | ||
143 | static int nes_netdev_open(struct net_device *netdev) | ||
144 | { | ||
145 | u32 macaddr_low; | ||
146 | u16 macaddr_high; | ||
147 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
148 | struct nes_device *nesdev = nesvnic->nesdev; | ||
149 | int ret; | ||
150 | int i; | ||
151 | struct nes_vnic *first_nesvnic; | ||
152 | u32 nic_active_bit; | ||
153 | u32 nic_active; | ||
154 | |||
155 | assert(nesdev != NULL); | ||
156 | |||
157 | first_nesvnic = list_entry(nesdev->nesadapter->nesvnic_list[nesdev->mac_index].next, | ||
158 | struct nes_vnic, list); | ||
159 | |||
160 | if (netif_msg_ifup(nesvnic)) | ||
161 | printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name); | ||
162 | |||
163 | ret = nes_init_nic_qp(nesdev, netdev); | ||
164 | if (ret) { | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | netif_carrier_off(netdev); | ||
169 | netif_stop_queue(netdev); | ||
170 | |||
171 | if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) { | ||
172 | nesvnic->nesibdev = nes_init_ofa_device(netdev); | ||
173 | if (nesvnic->nesibdev == NULL) { | ||
174 | printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name); | ||
175 | } else { | ||
176 | nesvnic->nesibdev->nesvnic = nesvnic; | ||
177 | ret = nes_register_ofa_device(nesvnic->nesibdev); | ||
178 | if (ret) { | ||
179 | printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n", | ||
180 | netdev->name, ret); | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | /* Set packet filters */ | ||
185 | nic_active_bit = 1 << nesvnic->nic_index; | ||
186 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE); | ||
187 | nic_active |= nic_active_bit; | ||
188 | nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active); | ||
189 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE); | ||
190 | nic_active |= nic_active_bit; | ||
191 | nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active); | ||
192 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON); | ||
193 | nic_active |= nic_active_bit; | ||
194 | nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); | ||
195 | |||
196 | macaddr_high = ((u16)netdev->dev_addr[0]) << 8; | ||
197 | macaddr_high += (u16)netdev->dev_addr[1]; | ||
198 | macaddr_low = ((u32)netdev->dev_addr[2]) << 24; | ||
199 | macaddr_low += ((u32)netdev->dev_addr[3]) << 16; | ||
200 | macaddr_low += ((u32)netdev->dev_addr[4]) << 8; | ||
201 | macaddr_low += (u32)netdev->dev_addr[5]; | ||
202 | |||
203 | /* Program the various MAC regs */ | ||
204 | for (i = 0; i < NES_MAX_PORT_COUNT; i++) { | ||
205 | if (nesvnic->qp_nic_index[i] == 0xf) { | ||
206 | break; | ||
207 | } | ||
208 | nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW" | ||
209 | " (Addr:%08X) = %08X, HIGH = %08X.\n", | ||
210 | i, nesvnic->qp_nic_index[i], | ||
211 | NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8), | ||
212 | macaddr_low, | ||
213 | (u32)macaddr_high | NES_MAC_ADDR_VALID | | ||
214 | ((((u32)nesvnic->nic_index) << 16))); | ||
215 | nes_write_indexed(nesdev, | ||
216 | NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8), | ||
217 | macaddr_low); | ||
218 | nes_write_indexed(nesdev, | ||
219 | NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8), | ||
220 | (u32)macaddr_high | NES_MAC_ADDR_VALID | | ||
221 | ((((u32)nesvnic->nic_index) << 16))); | ||
222 | } | ||
223 | |||
224 | |||
225 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | ||
226 | nesvnic->nic_cq.cq_number); | ||
227 | nes_read32(nesdev->regs+NES_CQE_ALLOC); | ||
228 | |||
229 | if (first_nesvnic->linkup) { | ||
230 | /* Enable network packets */ | ||
231 | nesvnic->linkup = 1; | ||
232 | netif_start_queue(netdev); | ||
233 | netif_carrier_on(netdev); | ||
234 | } | ||
235 | napi_enable(&nesvnic->napi); | ||
236 | nesvnic->netdev_open = 1; | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | |||
242 | /** | ||
243 | * nes_netdev_stop | ||
244 | */ | ||
245 | static int nes_netdev_stop(struct net_device *netdev) | ||
246 | { | ||
247 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
248 | struct nes_device *nesdev = nesvnic->nesdev; | ||
249 | u32 nic_active_mask; | ||
250 | u32 nic_active; | ||
251 | |||
252 | nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", | ||
253 | nesvnic, nesdev, netdev, netdev->name); | ||
254 | if (nesvnic->netdev_open == 0) | ||
255 | return 0; | ||
256 | |||
257 | if (netif_msg_ifdown(nesvnic)) | ||
258 | printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name); | ||
259 | |||
260 | /* Disable network packets */ | ||
261 | napi_disable(&nesvnic->napi); | ||
262 | netif_stop_queue(netdev); | ||
263 | if ((nesdev->netdev[0] == netdev) & (nesvnic->logical_port == nesdev->mac_index)) { | ||
264 | nes_write_indexed(nesdev, | ||
265 | NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff); | ||
266 | } | ||
267 | |||
268 | nic_active_mask = ~((u32)(1 << nesvnic->nic_index)); | ||
269 | nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+ | ||
270 | (nesvnic->perfect_filter_index*8), 0); | ||
271 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE); | ||
272 | nic_active &= nic_active_mask; | ||
273 | nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active); | ||
274 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); | ||
275 | nic_active &= nic_active_mask; | ||
276 | nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); | ||
277 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE); | ||
278 | nic_active &= nic_active_mask; | ||
279 | nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active); | ||
280 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); | ||
281 | nic_active &= nic_active_mask; | ||
282 | nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); | ||
283 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON); | ||
284 | nic_active &= nic_active_mask; | ||
285 | nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); | ||
286 | |||
287 | |||
288 | if (nesvnic->of_device_registered) { | ||
289 | nes_destroy_ofa_device(nesvnic->nesibdev); | ||
290 | nesvnic->nesibdev = NULL; | ||
291 | nesvnic->of_device_registered = 0; | ||
292 | } | ||
293 | nes_destroy_nic_qp(nesvnic); | ||
294 | |||
295 | nesvnic->netdev_open = 0; | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | |||
301 | /** | ||
302 | * nes_nic_send | ||
303 | */ | ||
304 | static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) | ||
305 | { | ||
306 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
307 | struct nes_device *nesdev = nesvnic->nesdev; | ||
308 | struct nes_hw_nic *nesnic = &nesvnic->nic; | ||
309 | struct nes_hw_nic_sq_wqe *nic_sqe; | ||
310 | struct tcphdr *tcph; | ||
311 | __le16 *wqe_fragment_length; | ||
312 | u32 wqe_misc; | ||
313 | u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */ | ||
314 | u16 skb_fragment_index; | ||
315 | dma_addr_t bus_address; | ||
316 | |||
317 | nic_sqe = &nesnic->sq_vbase[nesnic->sq_head]; | ||
318 | wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; | ||
319 | |||
320 | /* setup the VLAN tag if present */ | ||
321 | if (vlan_tx_tag_present(skb)) { | ||
322 | nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", | ||
323 | netdev->name, vlan_tx_tag_get(skb)); | ||
324 | wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; | ||
325 | wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); | ||
326 | } else | ||
327 | wqe_misc = 0; | ||
328 | |||
329 | /* bump past the vlan tag */ | ||
330 | wqe_fragment_length++; | ||
331 | /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */ | ||
332 | |||
333 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
334 | tcph = tcp_hdr(skb); | ||
335 | if (1) { | ||
336 | if (skb_is_gso(skb)) { | ||
337 | /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n", | ||
338 | netdev->name, skb_is_gso(skb)); */ | ||
339 | wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE | | ||
340 | NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb); | ||
341 | set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX, | ||
342 | ((u32)tcph->doff) | | ||
343 | (((u32)(((unsigned char *)tcph) - skb->data)) << 4)); | ||
344 | } else { | ||
345 | wqe_misc |= NES_NIC_SQ_WQE_COMPLETION; | ||
346 | } | ||
347 | } | ||
348 | } else { /* CHECKSUM_HW */ | ||
349 | wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION; | ||
350 | } | ||
351 | |||
352 | set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX, | ||
353 | skb->len); | ||
354 | memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer, | ||
355 | skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb))); | ||
356 | wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE), | ||
357 | skb_headlen(skb))); | ||
358 | wqe_fragment_length[1] = 0; | ||
359 | if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) { | ||
360 | if ((skb_shinfo(skb)->nr_frags + 1) > 4) { | ||
361 | nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n", | ||
362 | netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb)); | ||
363 | kfree_skb(skb); | ||
364 | nesvnic->tx_sw_dropped++; | ||
365 | return NETDEV_TX_LOCKED; | ||
366 | } | ||
367 | set_bit(nesnic->sq_head, nesnic->first_frag_overflow); | ||
368 | bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE, | ||
369 | skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE); | ||
370 | wqe_fragment_length[wqe_fragment_index++] = | ||
371 | cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE); | ||
372 | wqe_fragment_length[wqe_fragment_index] = 0; | ||
373 | set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, | ||
374 | ((u64)(bus_address))); | ||
375 | nesnic->tx_skb[nesnic->sq_head] = skb; | ||
376 | } | ||
377 | |||
378 | if (skb_headlen(skb) == skb->len) { | ||
379 | if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) { | ||
380 | nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0; | ||
381 | nesnic->tx_skb[nesnic->sq_head] = NULL; | ||
382 | dev_kfree_skb(skb); | ||
383 | } | ||
384 | } else { | ||
385 | /* Deal with Fragments */ | ||
386 | nesnic->tx_skb[nesnic->sq_head] = skb; | ||
387 | for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags; | ||
388 | skb_fragment_index++) { | ||
389 | bus_address = pci_map_page( nesdev->pcidev, | ||
390 | skb_shinfo(skb)->frags[skb_fragment_index].page, | ||
391 | skb_shinfo(skb)->frags[skb_fragment_index].page_offset, | ||
392 | skb_shinfo(skb)->frags[skb_fragment_index].size, | ||
393 | PCI_DMA_TODEVICE); | ||
394 | wqe_fragment_length[wqe_fragment_index] = | ||
395 | cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size); | ||
396 | set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), | ||
397 | bus_address); | ||
398 | wqe_fragment_index++; | ||
399 | if (wqe_fragment_index < 5) | ||
400 | wqe_fragment_length[wqe_fragment_index] = 0; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc); | ||
405 | nesnic->sq_head++; | ||
406 | nesnic->sq_head &= nesnic->sq_size - 1; | ||
407 | |||
408 | return NETDEV_TX_OK; | ||
409 | } | ||
410 | |||
411 | |||
412 | /** | ||
413 | * nes_netdev_start_xmit | ||
414 | */ | ||
415 | static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
416 | { | ||
417 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
418 | struct nes_device *nesdev = nesvnic->nesdev; | ||
419 | struct nes_hw_nic *nesnic = &nesvnic->nic; | ||
420 | struct nes_hw_nic_sq_wqe *nic_sqe; | ||
421 | struct tcphdr *tcph; | ||
422 | /* struct udphdr *udph; */ | ||
423 | #define NES_MAX_TSO_FRAGS 18 | ||
424 | /* 64K segment plus overflow on each side */ | ||
425 | dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS]; | ||
426 | dma_addr_t bus_address; | ||
427 | u32 tso_frag_index; | ||
428 | u32 tso_frag_count; | ||
429 | u32 tso_wqe_length; | ||
430 | u32 curr_tcp_seq; | ||
431 | u32 wqe_count=1; | ||
432 | u32 send_rc; | ||
433 | struct iphdr *iph; | ||
434 | unsigned long flags; | ||
435 | __le16 *wqe_fragment_length; | ||
436 | u32 nr_frags; | ||
437 | u32 original_first_length; | ||
438 | // u64 *wqe_fragment_address; | ||
439 | /* first fragment (0) is used by copy buffer */ | ||
440 | u16 wqe_fragment_index=1; | ||
441 | u16 hoffset; | ||
442 | u16 nhoffset; | ||
443 | u16 wqes_needed; | ||
444 | u16 wqes_available; | ||
445 | u32 old_head; | ||
446 | u32 wqe_misc; | ||
447 | |||
448 | /* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," | ||
449 | " (%u frags), tso_size=%u\n", | ||
450 | netdev->name, skb->len, skb_headlen(skb), | ||
451 | skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); | ||
452 | */ | ||
453 | |||
454 | if (!netif_carrier_ok(netdev)) | ||
455 | return NETDEV_TX_OK; | ||
456 | |||
457 | if (netif_queue_stopped(netdev)) | ||
458 | return NETDEV_TX_BUSY; | ||
459 | |||
460 | local_irq_save(flags); | ||
461 | if (!spin_trylock(&nesnic->sq_lock)) { | ||
462 | local_irq_restore(flags); | ||
463 | nesvnic->sq_locked++; | ||
464 | return NETDEV_TX_LOCKED; | ||
465 | } | ||
466 | |||
467 | /* Check if SQ is full */ | ||
468 | if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) { | ||
469 | if (!netif_queue_stopped(netdev)) { | ||
470 | netif_stop_queue(netdev); | ||
471 | barrier(); | ||
472 | if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) { | ||
473 | netif_start_queue(netdev); | ||
474 | goto sq_no_longer_full; | ||
475 | } | ||
476 | } | ||
477 | nesvnic->sq_full++; | ||
478 | spin_unlock_irqrestore(&nesnic->sq_lock, flags); | ||
479 | return NETDEV_TX_BUSY; | ||
480 | } | ||
481 | |||
482 | sq_no_longer_full: | ||
483 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
484 | if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) { | ||
485 | nr_frags++; | ||
486 | } | ||
487 | /* Check if too many fragments */ | ||
488 | if (unlikely((nr_frags > 4))) { | ||
489 | if (skb_is_gso(skb)) { | ||
490 | nesvnic->segmented_tso_requests++; | ||
491 | nesvnic->tso_requests++; | ||
492 | old_head = nesnic->sq_head; | ||
493 | /* Basically 4 fragments available per WQE with extended fragments */ | ||
494 | wqes_needed = nr_frags >> 2; | ||
495 | wqes_needed += (nr_frags&3)?1:0; | ||
496 | wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) & | ||
497 | (nesnic->sq_size - 1); | ||
498 | |||
499 | if (unlikely(wqes_needed > wqes_available)) { | ||
500 | if (!netif_queue_stopped(netdev)) { | ||
501 | netif_stop_queue(netdev); | ||
502 | barrier(); | ||
503 | wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) & | ||
504 | (nesnic->sq_size - 1); | ||
505 | if (wqes_needed <= wqes_available) { | ||
506 | netif_start_queue(netdev); | ||
507 | goto tso_sq_no_longer_full; | ||
508 | } | ||
509 | } | ||
510 | nesvnic->sq_full++; | ||
511 | spin_unlock_irqrestore(&nesnic->sq_lock, flags); | ||
512 | nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n", | ||
513 | netdev->name); | ||
514 | return NETDEV_TX_BUSY; | ||
515 | } | ||
516 | tso_sq_no_longer_full: | ||
517 | /* Map all the buffers */ | ||
518 | for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags; | ||
519 | tso_frag_count++) { | ||
520 | tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev, | ||
521 | skb_shinfo(skb)->frags[tso_frag_count].page, | ||
522 | skb_shinfo(skb)->frags[tso_frag_count].page_offset, | ||
523 | skb_shinfo(skb)->frags[tso_frag_count].size, | ||
524 | PCI_DMA_TODEVICE); | ||
525 | } | ||
526 | |||
527 | tso_frag_index = 0; | ||
528 | curr_tcp_seq = ntohl(tcp_hdr(skb)->seq); | ||
529 | hoffset = skb_transport_header(skb) - skb->data; | ||
530 | nhoffset = skb_network_header(skb) - skb->data; | ||
531 | original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2); | ||
532 | |||
533 | for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) { | ||
534 | tso_wqe_length = 0; | ||
535 | nic_sqe = &nesnic->sq_vbase[nesnic->sq_head]; | ||
536 | wqe_fragment_length = | ||
537 | (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; | ||
538 | /* setup the VLAN tag if present */ | ||
539 | if (vlan_tx_tag_present(skb)) { | ||
540 | nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", | ||
541 | netdev->name, vlan_tx_tag_get(skb) ); | ||
542 | wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; | ||
543 | wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); | ||
544 | } else | ||
545 | wqe_misc = 0; | ||
546 | |||
547 | /* bump past the vlan tag */ | ||
548 | wqe_fragment_length++; | ||
549 | |||
550 | /* Assumes header totally fits in allocated buffer and is in first fragment */ | ||
551 | if (original_first_length > NES_FIRST_FRAG_SIZE) { | ||
552 | nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n", | ||
553 | original_first_length, NES_FIRST_FRAG_SIZE); | ||
554 | nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," | ||
555 | " (%u frags), tso_size=%u\n", | ||
556 | netdev->name, | ||
557 | skb->len, skb_headlen(skb), | ||
558 | skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); | ||
559 | } | ||
560 | memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer, | ||
561 | skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), | ||
562 | original_first_length)); | ||
563 | iph = (struct iphdr *) | ||
564 | (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]); | ||
565 | tcph = (struct tcphdr *) | ||
566 | (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]); | ||
567 | if ((wqe_count+1)!=(u32)wqes_needed) { | ||
568 | tcph->fin = 0; | ||
569 | tcph->psh = 0; | ||
570 | tcph->rst = 0; | ||
571 | tcph->urg = 0; | ||
572 | } | ||
573 | if (wqe_count) { | ||
574 | tcph->syn = 0; | ||
575 | } | ||
576 | tcph->seq = htonl(curr_tcp_seq); | ||
577 | wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE), | ||
578 | original_first_length)); | ||
579 | |||
580 | wqe_fragment_index = 1; | ||
581 | if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) { | ||
582 | set_bit(nesnic->sq_head, nesnic->first_frag_overflow); | ||
583 | bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length, | ||
584 | skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE); | ||
585 | wqe_fragment_length[wqe_fragment_index++] = | ||
586 | cpu_to_le16(skb_headlen(skb) - original_first_length); | ||
587 | wqe_fragment_length[wqe_fragment_index] = 0; | ||
588 | set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, | ||
589 | bus_address); | ||
590 | } | ||
591 | while (wqe_fragment_index < 5) { | ||
592 | wqe_fragment_length[wqe_fragment_index] = | ||
593 | cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size); | ||
594 | set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), | ||
595 | (u64)tso_bus_address[tso_frag_index]); | ||
596 | wqe_fragment_index++; | ||
597 | tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size; | ||
598 | if (wqe_fragment_index < 5) | ||
599 | wqe_fragment_length[wqe_fragment_index] = 0; | ||
600 | if (tso_frag_index == tso_frag_count) | ||
601 | break; | ||
602 | } | ||
603 | if ((wqe_count+1) == (u32)wqes_needed) { | ||
604 | nesnic->tx_skb[nesnic->sq_head] = skb; | ||
605 | } else { | ||
606 | nesnic->tx_skb[nesnic->sq_head] = NULL; | ||
607 | } | ||
608 | wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb); | ||
609 | if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) { | ||
610 | wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE; | ||
611 | } else { | ||
612 | iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset); | ||
613 | } | ||
614 | |||
615 | set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, | ||
616 | wqe_misc); | ||
617 | set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX, | ||
618 | ((u32)tcph->doff) | (((u32)hoffset) << 4)); | ||
619 | |||
620 | set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX, | ||
621 | tso_wqe_length + original_first_length); | ||
622 | curr_tcp_seq += tso_wqe_length; | ||
623 | nesnic->sq_head++; | ||
624 | nesnic->sq_head &= nesnic->sq_size-1; | ||
625 | } | ||
626 | } else { | ||
627 | nesvnic->linearized_skbs++; | ||
628 | hoffset = skb_transport_header(skb) - skb->data; | ||
629 | nhoffset = skb_network_header(skb) - skb->data; | ||
630 | skb_linearize(skb); | ||
631 | skb_set_transport_header(skb, hoffset); | ||
632 | skb_set_network_header(skb, nhoffset); | ||
633 | send_rc = nes_nic_send(skb, netdev); | ||
634 | if (send_rc != NETDEV_TX_OK) { | ||
635 | spin_unlock_irqrestore(&nesnic->sq_lock, flags); | ||
636 | return NETDEV_TX_OK; | ||
637 | } | ||
638 | } | ||
639 | } else { | ||
640 | send_rc = nes_nic_send(skb, netdev); | ||
641 | if (send_rc != NETDEV_TX_OK) { | ||
642 | spin_unlock_irqrestore(&nesnic->sq_lock, flags); | ||
643 | return NETDEV_TX_OK; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | barrier(); | ||
648 | |||
649 | if (wqe_count) | ||
650 | nes_write32(nesdev->regs+NES_WQE_ALLOC, | ||
651 | (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); | ||
652 | |||
653 | netdev->trans_start = jiffies; | ||
654 | spin_unlock_irqrestore(&nesnic->sq_lock, flags); | ||
655 | |||
656 | return NETDEV_TX_OK; | ||
657 | } | ||
658 | |||
659 | |||
660 | /** | ||
661 | * nes_netdev_get_stats | ||
662 | */ | ||
663 | static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev) | ||
664 | { | ||
665 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
666 | struct nes_device *nesdev = nesvnic->nesdev; | ||
667 | u64 u64temp; | ||
668 | u32 u32temp; | ||
669 | |||
670 | u32temp = nes_read_indexed(nesdev, | ||
671 | NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200)); | ||
672 | nesvnic->netstats.rx_dropped += u32temp; | ||
673 | nesvnic->endnode_nstat_rx_discard += u32temp; | ||
674 | |||
675 | u64temp = (u64)nes_read_indexed(nesdev, | ||
676 | NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200)); | ||
677 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
678 | NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32; | ||
679 | |||
680 | nesvnic->endnode_nstat_rx_octets += u64temp; | ||
681 | nesvnic->netstats.rx_bytes += u64temp; | ||
682 | |||
683 | u64temp = (u64)nes_read_indexed(nesdev, | ||
684 | NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200)); | ||
685 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
686 | NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32; | ||
687 | |||
688 | nesvnic->endnode_nstat_rx_frames += u64temp; | ||
689 | nesvnic->netstats.rx_packets += u64temp; | ||
690 | |||
691 | u64temp = (u64)nes_read_indexed(nesdev, | ||
692 | NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200)); | ||
693 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
694 | NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32; | ||
695 | |||
696 | nesvnic->endnode_nstat_tx_octets += u64temp; | ||
697 | nesvnic->netstats.tx_bytes += u64temp; | ||
698 | |||
699 | u64temp = (u64)nes_read_indexed(nesdev, | ||
700 | NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200)); | ||
701 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
702 | NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32; | ||
703 | |||
704 | nesvnic->endnode_nstat_tx_frames += u64temp; | ||
705 | nesvnic->netstats.tx_packets += u64temp; | ||
706 | |||
707 | u32temp = nes_read_indexed(nesdev, | ||
708 | NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
709 | nesvnic->netstats.rx_dropped += u32temp; | ||
710 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
711 | nesvnic->nesdev->mac_rx_short_frames += u32temp; | ||
712 | |||
713 | u32temp = nes_read_indexed(nesdev, | ||
714 | NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
715 | nesvnic->netstats.rx_dropped += u32temp; | ||
716 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
717 | nesvnic->nesdev->mac_rx_oversized_frames += u32temp; | ||
718 | |||
719 | u32temp = nes_read_indexed(nesdev, | ||
720 | NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
721 | nesvnic->netstats.rx_dropped += u32temp; | ||
722 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
723 | nesvnic->nesdev->mac_rx_jabber_frames += u32temp; | ||
724 | |||
725 | u32temp = nes_read_indexed(nesdev, | ||
726 | NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
727 | nesvnic->netstats.rx_dropped += u32temp; | ||
728 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
729 | nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; | ||
730 | |||
731 | u32temp = nes_read_indexed(nesdev, | ||
732 | NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
733 | nesvnic->netstats.rx_length_errors += u32temp; | ||
734 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
735 | |||
736 | u32temp = nes_read_indexed(nesdev, | ||
737 | NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
738 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
739 | nesvnic->nesdev->mac_rx_crc_errors += u32temp; | ||
740 | nesvnic->netstats.rx_crc_errors += u32temp; | ||
741 | |||
742 | u32temp = nes_read_indexed(nesdev, | ||
743 | NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); | ||
744 | nesvnic->nesdev->mac_tx_errors += u32temp; | ||
745 | nesvnic->netstats.tx_errors += u32temp; | ||
746 | |||
747 | return &nesvnic->netstats; | ||
748 | } | ||
749 | |||
750 | |||
751 | /** | ||
752 | * nes_netdev_tx_timeout | ||
753 | */ | ||
754 | static void nes_netdev_tx_timeout(struct net_device *netdev) | ||
755 | { | ||
756 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
757 | |||
758 | if (netif_msg_timer(nesvnic)) | ||
759 | nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name); | ||
760 | } | ||
761 | |||
762 | |||
763 | /** | ||
764 | * nes_netdev_set_mac_address | ||
765 | */ | ||
766 | static int nes_netdev_set_mac_address(struct net_device *netdev, void *p) | ||
767 | { | ||
768 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
769 | struct nes_device *nesdev = nesvnic->nesdev; | ||
770 | struct sockaddr *mac_addr = p; | ||
771 | int i; | ||
772 | u32 macaddr_low; | ||
773 | u16 macaddr_high; | ||
774 | |||
775 | if (!is_valid_ether_addr(mac_addr->sa_data)) | ||
776 | return -EADDRNOTAVAIL; | ||
777 | |||
778 | memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); | ||
779 | printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", | ||
780 | __FUNCTION__, netdev->addr_len, | ||
781 | mac_addr->sa_data[0], mac_addr->sa_data[1], | ||
782 | mac_addr->sa_data[2], mac_addr->sa_data[3], | ||
783 | mac_addr->sa_data[4], mac_addr->sa_data[5]); | ||
784 | macaddr_high = ((u16)netdev->dev_addr[0]) << 8; | ||
785 | macaddr_high += (u16)netdev->dev_addr[1]; | ||
786 | macaddr_low = ((u32)netdev->dev_addr[2]) << 24; | ||
787 | macaddr_low += ((u32)netdev->dev_addr[3]) << 16; | ||
788 | macaddr_low += ((u32)netdev->dev_addr[4]) << 8; | ||
789 | macaddr_low += (u32)netdev->dev_addr[5]; | ||
790 | |||
791 | for (i = 0; i < NES_MAX_PORT_COUNT; i++) { | ||
792 | if (nesvnic->qp_nic_index[i] == 0xf) { | ||
793 | break; | ||
794 | } | ||
795 | nes_write_indexed(nesdev, | ||
796 | NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8), | ||
797 | macaddr_low); | ||
798 | nes_write_indexed(nesdev, | ||
799 | NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8), | ||
800 | (u32)macaddr_high | NES_MAC_ADDR_VALID | | ||
801 | ((((u32)nesvnic->nic_index) << 16))); | ||
802 | } | ||
803 | return 0; | ||
804 | } | ||
805 | |||
806 | |||
807 | /** | ||
808 | * nes_netdev_set_multicast_list | ||
809 | */ | ||
810 | void nes_netdev_set_multicast_list(struct net_device *netdev) | ||
811 | { | ||
812 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
813 | struct nes_device *nesdev = nesvnic->nesdev; | ||
814 | struct dev_mc_list *multicast_addr; | ||
815 | u32 nic_active_bit; | ||
816 | u32 nic_active; | ||
817 | u32 perfect_filter_register_address; | ||
818 | u32 macaddr_low; | ||
819 | u16 macaddr_high; | ||
820 | u8 mc_all_on = 0; | ||
821 | u8 mc_index; | ||
822 | int mc_nic_index = -1; | ||
823 | |||
824 | nic_active_bit = 1 << nesvnic->nic_index; | ||
825 | |||
826 | if (netdev->flags & IFF_PROMISC) { | ||
827 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); | ||
828 | nic_active |= nic_active_bit; | ||
829 | nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); | ||
830 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); | ||
831 | nic_active |= nic_active_bit; | ||
832 | nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); | ||
833 | mc_all_on = 1; | ||
834 | } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) || | ||
835 | (nesvnic->nic_index > 3)) { | ||
836 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); | ||
837 | nic_active |= nic_active_bit; | ||
838 | nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); | ||
839 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); | ||
840 | nic_active &= ~nic_active_bit; | ||
841 | nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); | ||
842 | mc_all_on = 1; | ||
843 | } else { | ||
844 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); | ||
845 | nic_active &= ~nic_active_bit; | ||
846 | nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); | ||
847 | nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); | ||
848 | nic_active &= ~nic_active_bit; | ||
849 | nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); | ||
850 | } | ||
851 | |||
852 | nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n", | ||
853 | netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0, | ||
854 | (netdev->flags & IFF_ALLMULTI)?1:0); | ||
855 | if (!mc_all_on) { | ||
856 | multicast_addr = netdev->mc_list; | ||
857 | perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80; | ||
858 | perfect_filter_register_address += nesvnic->nic_index*0x40; | ||
859 | for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) { | ||
860 | while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0)) | ||
861 | multicast_addr = multicast_addr->next; | ||
862 | |||
863 | if (mc_nic_index < 0) | ||
864 | mc_nic_index = nesvnic->nic_index; | ||
865 | if (multicast_addr) { | ||
866 | nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", | ||
867 | multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], | ||
868 | multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], | ||
869 | multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], | ||
870 | perfect_filter_register_address+(mc_index * 8), mc_nic_index); | ||
871 | macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; | ||
872 | macaddr_high += (u16)multicast_addr->dmi_addr[1]; | ||
873 | macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; | ||
874 | macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16; | ||
875 | macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8; | ||
876 | macaddr_low += (u32)multicast_addr->dmi_addr[5]; | ||
877 | nes_write_indexed(nesdev, | ||
878 | perfect_filter_register_address+(mc_index * 8), | ||
879 | macaddr_low); | ||
880 | nes_write_indexed(nesdev, | ||
881 | perfect_filter_register_address+4+(mc_index * 8), | ||
882 | (u32)macaddr_high | NES_MAC_ADDR_VALID | | ||
883 | ((((u32)(1<<mc_nic_index)) << 16))); | ||
884 | multicast_addr = multicast_addr->next; | ||
885 | } else { | ||
886 | nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n", | ||
887 | perfect_filter_register_address+(mc_index * 8)); | ||
888 | nes_write_indexed(nesdev, | ||
889 | perfect_filter_register_address+4+(mc_index * 8), | ||
890 | 0); | ||
891 | } | ||
892 | } | ||
893 | } | ||
894 | } | ||
895 | |||
896 | |||
897 | /** | ||
898 | * nes_netdev_change_mtu | ||
899 | */ | ||
900 | static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu) | ||
901 | { | ||
902 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
903 | struct nes_device *nesdev = nesvnic->nesdev; | ||
904 | int ret = 0; | ||
905 | u8 jumbomode=0; | ||
906 | |||
907 | if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu)) | ||
908 | return -EINVAL; | ||
909 | |||
910 | netdev->mtu = new_mtu; | ||
911 | nesvnic->max_frame_size = new_mtu+ETH_HLEN; | ||
912 | |||
913 | if (netdev->mtu > 1500) { | ||
914 | jumbomode=1; | ||
915 | } | ||
916 | nes_nic_init_timer_defaults(nesdev, jumbomode); | ||
917 | |||
918 | if (netif_running(netdev)) { | ||
919 | nes_netdev_stop(netdev); | ||
920 | nes_netdev_open(netdev); | ||
921 | } | ||
922 | |||
923 | return ret; | ||
924 | } | ||
925 | |||
926 | |||
927 | /** | ||
928 | * nes_netdev_exit - destroy network device | ||
929 | */ | ||
930 | void nes_netdev_exit(struct nes_vnic *nesvnic) | ||
931 | { | ||
932 | struct net_device *netdev = nesvnic->netdev; | ||
933 | struct nes_ib_device *nesibdev = nesvnic->nesibdev; | ||
934 | |||
935 | nes_debug(NES_DBG_SHUTDOWN, "\n"); | ||
936 | |||
937 | // destroy the ibdevice if RDMA enabled | ||
938 | if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) { | ||
939 | nes_destroy_ofa_device( nesibdev ); | ||
940 | nesvnic->of_device_registered = 0; | ||
941 | nesvnic->nesibdev = NULL; | ||
942 | } | ||
943 | unregister_netdev(netdev); | ||
944 | nes_debug(NES_DBG_SHUTDOWN, "\n"); | ||
945 | } | ||
946 | |||
947 | |||
948 | #define NES_ETHTOOL_STAT_COUNT 55 | ||
949 | static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = { | ||
950 | "Link Change Interrupts", | ||
951 | "Linearized SKBs", | ||
952 | "T/GSO Requests", | ||
953 | "Pause Frames Sent", | ||
954 | "Pause Frames Received", | ||
955 | "Internal Routing Errors", | ||
956 | "SQ SW Dropped SKBs", | ||
957 | "SQ Locked", | ||
958 | "SQ Full", | ||
959 | "Segmented TSO Requests", | ||
960 | "Rx Symbol Errors", | ||
961 | "Rx Jabber Errors", | ||
962 | "Rx Oversized Frames", | ||
963 | "Rx Short Frames", | ||
964 | "Endnode Rx Discards", | ||
965 | "Endnode Rx Octets", | ||
966 | "Endnode Rx Frames", | ||
967 | "Endnode Tx Octets", | ||
968 | "Endnode Tx Frames", | ||
969 | "mh detected", | ||
970 | "mh pauses", | ||
971 | "Retransmission Count", | ||
972 | "CM Connects", | ||
973 | "CM Accepts", | ||
974 | "Disconnects", | ||
975 | "Connected Events", | ||
976 | "Connect Requests", | ||
977 | "CM Rejects", | ||
978 | "ModifyQP Timeouts", | ||
979 | "CreateQPs", | ||
980 | "SW DestroyQPs", | ||
981 | "DestroyQPs", | ||
982 | "CM Closes", | ||
983 | "CM Packets Sent", | ||
984 | "CM Packets Bounced", | ||
985 | "CM Packets Created", | ||
986 | "CM Packets Rcvd", | ||
987 | "CM Packets Dropped", | ||
988 | "CM Packets Retrans", | ||
989 | "CM Listens Created", | ||
990 | "CM Listens Destroyed", | ||
991 | "CM Backlog Drops", | ||
992 | "CM Loopbacks", | ||
993 | "CM Nodes Created", | ||
994 | "CM Nodes Destroyed", | ||
995 | "CM Accel Drops", | ||
996 | "CM Resets Received", | ||
997 | "Timer Inits", | ||
998 | "CQ Depth 1", | ||
999 | "CQ Depth 4", | ||
1000 | "CQ Depth 16", | ||
1001 | "CQ Depth 24", | ||
1002 | "CQ Depth 32", | ||
1003 | "CQ Depth 128", | ||
1004 | "CQ Depth 256", | ||
1005 | }; | ||
1006 | |||
1007 | |||
1008 | /** | ||
1009 | * nes_netdev_get_rx_csum | ||
1010 | */ | ||
1011 | static u32 nes_netdev_get_rx_csum (struct net_device *netdev) | ||
1012 | { | ||
1013 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1014 | |||
1015 | if (nesvnic->rx_checksum_disabled) | ||
1016 | return 0; | ||
1017 | else | ||
1018 | return 1; | ||
1019 | } | ||
1020 | |||
1021 | |||
1022 | /** | ||
1023 | * nes_netdev_set_rc_csum | ||
1024 | */ | ||
1025 | static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable) | ||
1026 | { | ||
1027 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1028 | |||
1029 | if (enable) | ||
1030 | nesvnic->rx_checksum_disabled = 0; | ||
1031 | else | ||
1032 | nesvnic->rx_checksum_disabled = 1; | ||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | |||
1037 | /** | ||
1038 | * nes_netdev_get_stats_count | ||
1039 | */ | ||
1040 | static int nes_netdev_get_stats_count(struct net_device *netdev) | ||
1041 | { | ||
1042 | return NES_ETHTOOL_STAT_COUNT; | ||
1043 | } | ||
1044 | |||
1045 | |||
1046 | /** | ||
1047 | * nes_netdev_get_strings | ||
1048 | */ | ||
1049 | static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset, | ||
1050 | u8 *ethtool_strings) | ||
1051 | { | ||
1052 | if (stringset == ETH_SS_STATS) | ||
1053 | memcpy(ethtool_strings, | ||
1054 | &nes_ethtool_stringset, | ||
1055 | sizeof(nes_ethtool_stringset)); | ||
1056 | } | ||
1057 | |||
1058 | |||
1059 | /** | ||
1060 | * nes_netdev_get_ethtool_stats | ||
1061 | */ | ||
1062 | static void nes_netdev_get_ethtool_stats(struct net_device *netdev, | ||
1063 | struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) | ||
1064 | { | ||
1065 | u64 u64temp; | ||
1066 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1067 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1068 | u32 nic_count; | ||
1069 | u32 u32temp; | ||
1070 | |||
1071 | target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT; | ||
1072 | target_stat_values[0] = nesvnic->nesdev->link_status_interrupts; | ||
1073 | target_stat_values[1] = nesvnic->linearized_skbs; | ||
1074 | target_stat_values[2] = nesvnic->tso_requests; | ||
1075 | |||
1076 | u32temp = nes_read_indexed(nesdev, | ||
1077 | NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1078 | nesvnic->nesdev->mac_pause_frames_sent += u32temp; | ||
1079 | target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent; | ||
1080 | |||
1081 | u32temp = nes_read_indexed(nesdev, | ||
1082 | NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1083 | nesvnic->nesdev->mac_pause_frames_received += u32temp; | ||
1084 | |||
1085 | u32temp = nes_read_indexed(nesdev, | ||
1086 | NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40)); | ||
1087 | nesvnic->nesdev->port_rx_discards += u32temp; | ||
1088 | nesvnic->netstats.rx_dropped += u32temp; | ||
1089 | |||
1090 | u32temp = nes_read_indexed(nesdev, | ||
1091 | NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40)); | ||
1092 | nesvnic->nesdev->port_tx_discards += u32temp; | ||
1093 | nesvnic->netstats.tx_dropped += u32temp; | ||
1094 | |||
1095 | for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { | ||
1096 | if (nesvnic->qp_nic_index[nic_count] == 0xf) | ||
1097 | break; | ||
1098 | |||
1099 | u32temp = nes_read_indexed(nesdev, | ||
1100 | NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + | ||
1101 | (nesvnic->qp_nic_index[nic_count]*0x200)); | ||
1102 | nesvnic->netstats.rx_dropped += u32temp; | ||
1103 | nesvnic->endnode_nstat_rx_discard += u32temp; | ||
1104 | |||
1105 | u64temp = (u64)nes_read_indexed(nesdev, | ||
1106 | NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + | ||
1107 | (nesvnic->qp_nic_index[nic_count]*0x200)); | ||
1108 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
1109 | NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + | ||
1110 | (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; | ||
1111 | |||
1112 | nesvnic->endnode_nstat_rx_octets += u64temp; | ||
1113 | nesvnic->netstats.rx_bytes += u64temp; | ||
1114 | |||
1115 | u64temp = (u64)nes_read_indexed(nesdev, | ||
1116 | NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + | ||
1117 | (nesvnic->qp_nic_index[nic_count]*0x200)); | ||
1118 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
1119 | NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + | ||
1120 | (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; | ||
1121 | |||
1122 | nesvnic->endnode_nstat_rx_frames += u64temp; | ||
1123 | nesvnic->netstats.rx_packets += u64temp; | ||
1124 | |||
1125 | u64temp = (u64)nes_read_indexed(nesdev, | ||
1126 | NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + | ||
1127 | (nesvnic->qp_nic_index[nic_count]*0x200)); | ||
1128 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
1129 | NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + | ||
1130 | (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; | ||
1131 | |||
1132 | nesvnic->endnode_nstat_tx_octets += u64temp; | ||
1133 | nesvnic->netstats.tx_bytes += u64temp; | ||
1134 | |||
1135 | u64temp = (u64)nes_read_indexed(nesdev, | ||
1136 | NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + | ||
1137 | (nesvnic->qp_nic_index[nic_count]*0x200)); | ||
1138 | u64temp += ((u64)nes_read_indexed(nesdev, | ||
1139 | NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + | ||
1140 | (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; | ||
1141 | |||
1142 | nesvnic->endnode_nstat_tx_frames += u64temp; | ||
1143 | nesvnic->netstats.tx_packets += u64temp; | ||
1144 | |||
1145 | u32temp = nes_read_indexed(nesdev, | ||
1146 | NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200)); | ||
1147 | nesvnic->endnode_ipv4_tcp_retransmits += u32temp; | ||
1148 | } | ||
1149 | |||
1150 | target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received; | ||
1151 | target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err; | ||
1152 | target_stat_values[6] = nesvnic->tx_sw_dropped; | ||
1153 | target_stat_values[7] = nesvnic->sq_locked; | ||
1154 | target_stat_values[8] = nesvnic->sq_full; | ||
1155 | target_stat_values[9] = nesvnic->segmented_tso_requests; | ||
1156 | target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames; | ||
1157 | target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames; | ||
1158 | target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames; | ||
1159 | target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames; | ||
1160 | target_stat_values[14] = nesvnic->endnode_nstat_rx_discard; | ||
1161 | target_stat_values[15] = nesvnic->endnode_nstat_rx_octets; | ||
1162 | target_stat_values[16] = nesvnic->endnode_nstat_rx_frames; | ||
1163 | target_stat_values[17] = nesvnic->endnode_nstat_tx_octets; | ||
1164 | target_stat_values[18] = nesvnic->endnode_nstat_tx_frames; | ||
1165 | target_stat_values[19] = mh_detected; | ||
1166 | target_stat_values[20] = mh_pauses_sent; | ||
1167 | target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits; | ||
1168 | target_stat_values[22] = atomic_read(&cm_connects); | ||
1169 | target_stat_values[23] = atomic_read(&cm_accepts); | ||
1170 | target_stat_values[24] = atomic_read(&cm_disconnects); | ||
1171 | target_stat_values[25] = atomic_read(&cm_connecteds); | ||
1172 | target_stat_values[26] = atomic_read(&cm_connect_reqs); | ||
1173 | target_stat_values[27] = atomic_read(&cm_rejects); | ||
1174 | target_stat_values[28] = atomic_read(&mod_qp_timouts); | ||
1175 | target_stat_values[29] = atomic_read(&qps_created); | ||
1176 | target_stat_values[30] = atomic_read(&sw_qps_destroyed); | ||
1177 | target_stat_values[31] = atomic_read(&qps_destroyed); | ||
1178 | target_stat_values[32] = atomic_read(&cm_closes); | ||
1179 | target_stat_values[33] = cm_packets_sent; | ||
1180 | target_stat_values[34] = cm_packets_bounced; | ||
1181 | target_stat_values[35] = cm_packets_created; | ||
1182 | target_stat_values[36] = cm_packets_received; | ||
1183 | target_stat_values[37] = cm_packets_dropped; | ||
1184 | target_stat_values[38] = cm_packets_retrans; | ||
1185 | target_stat_values[39] = cm_listens_created; | ||
1186 | target_stat_values[40] = cm_listens_destroyed; | ||
1187 | target_stat_values[41] = cm_backlog_drops; | ||
1188 | target_stat_values[42] = atomic_read(&cm_loopbacks); | ||
1189 | target_stat_values[43] = atomic_read(&cm_nodes_created); | ||
1190 | target_stat_values[44] = atomic_read(&cm_nodes_destroyed); | ||
1191 | target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts); | ||
1192 | target_stat_values[46] = atomic_read(&cm_resets_recvd); | ||
1193 | target_stat_values[47] = int_mod_timer_init; | ||
1194 | target_stat_values[48] = int_mod_cq_depth_1; | ||
1195 | target_stat_values[49] = int_mod_cq_depth_4; | ||
1196 | target_stat_values[50] = int_mod_cq_depth_16; | ||
1197 | target_stat_values[51] = int_mod_cq_depth_24; | ||
1198 | target_stat_values[52] = int_mod_cq_depth_32; | ||
1199 | target_stat_values[53] = int_mod_cq_depth_128; | ||
1200 | target_stat_values[54] = int_mod_cq_depth_256; | ||
1201 | |||
1202 | } | ||
1203 | |||
1204 | |||
1205 | /** | ||
1206 | * nes_netdev_get_drvinfo | ||
1207 | */ | ||
1208 | static void nes_netdev_get_drvinfo(struct net_device *netdev, | ||
1209 | struct ethtool_drvinfo *drvinfo) | ||
1210 | { | ||
1211 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1212 | |||
1213 | strcpy(drvinfo->driver, DRV_NAME); | ||
1214 | strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev)); | ||
1215 | strcpy(drvinfo->fw_version, "TBD"); | ||
1216 | strcpy(drvinfo->version, DRV_VERSION); | ||
1217 | drvinfo->n_stats = nes_netdev_get_stats_count(netdev); | ||
1218 | drvinfo->testinfo_len = 0; | ||
1219 | drvinfo->eedump_len = 0; | ||
1220 | drvinfo->regdump_len = 0; | ||
1221 | } | ||
1222 | |||
1223 | |||
1224 | /** | ||
1225 | * nes_netdev_set_coalesce | ||
1226 | */ | ||
1227 | static int nes_netdev_set_coalesce(struct net_device *netdev, | ||
1228 | struct ethtool_coalesce *et_coalesce) | ||
1229 | { | ||
1230 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1231 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1232 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1233 | struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; | ||
1234 | unsigned long flags; | ||
1235 | |||
1236 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | ||
1237 | if (et_coalesce->rx_max_coalesced_frames_low) { | ||
1238 | shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low; | ||
1239 | } | ||
1240 | if (et_coalesce->rx_max_coalesced_frames_irq) { | ||
1241 | shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq; | ||
1242 | } | ||
1243 | if (et_coalesce->rx_max_coalesced_frames_high) { | ||
1244 | shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high; | ||
1245 | } | ||
1246 | if (et_coalesce->rx_coalesce_usecs_low) { | ||
1247 | shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low; | ||
1248 | } | ||
1249 | if (et_coalesce->rx_coalesce_usecs_high) { | ||
1250 | shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high; | ||
1251 | } | ||
1252 | spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); | ||
1253 | |||
1254 | /* using this to drive total interrupt moderation */ | ||
1255 | nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq; | ||
1256 | if (et_coalesce->use_adaptive_rx_coalesce) { | ||
1257 | nesadapter->et_use_adaptive_rx_coalesce = 1; | ||
1258 | nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; | ||
1259 | nesadapter->et_rx_coalesce_usecs_irq = 0; | ||
1260 | if (et_coalesce->pkt_rate_low) { | ||
1261 | nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low; | ||
1262 | } | ||
1263 | } else { | ||
1264 | nesadapter->et_use_adaptive_rx_coalesce = 0; | ||
1265 | nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT; | ||
1266 | if (nesadapter->et_rx_coalesce_usecs_irq) { | ||
1267 | nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, | ||
1268 | 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8))); | ||
1269 | } | ||
1270 | } | ||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1274 | |||
1275 | /** | ||
1276 | * nes_netdev_get_coalesce | ||
1277 | */ | ||
1278 | static int nes_netdev_get_coalesce(struct net_device *netdev, | ||
1279 | struct ethtool_coalesce *et_coalesce) | ||
1280 | { | ||
1281 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1282 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1283 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1284 | struct ethtool_coalesce temp_et_coalesce; | ||
1285 | struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; | ||
1286 | unsigned long flags; | ||
1287 | |||
1288 | memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce)); | ||
1289 | temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq; | ||
1290 | temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce; | ||
1291 | temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval; | ||
1292 | temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low; | ||
1293 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | ||
1294 | temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low; | ||
1295 | temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target; | ||
1296 | temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high; | ||
1297 | temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min; | ||
1298 | temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max; | ||
1299 | if (nesadapter->et_use_adaptive_rx_coalesce) { | ||
1300 | temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use; | ||
1301 | } | ||
1302 | spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); | ||
1303 | memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce)); | ||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | |||
1308 | /** | ||
1309 | * nes_netdev_get_pauseparam | ||
1310 | */ | ||
1311 | static void nes_netdev_get_pauseparam(struct net_device *netdev, | ||
1312 | struct ethtool_pauseparam *et_pauseparam) | ||
1313 | { | ||
1314 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1315 | |||
1316 | et_pauseparam->autoneg = 0; | ||
1317 | et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0; | ||
1318 | et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0; | ||
1319 | } | ||
1320 | |||
1321 | |||
1322 | /** | ||
1323 | * nes_netdev_set_pauseparam | ||
1324 | */ | ||
1325 | static int nes_netdev_set_pauseparam(struct net_device *netdev, | ||
1326 | struct ethtool_pauseparam *et_pauseparam) | ||
1327 | { | ||
1328 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1329 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1330 | u32 u32temp; | ||
1331 | |||
1332 | if (et_pauseparam->autoneg) { | ||
1333 | /* TODO: should return unsupported */ | ||
1334 | return 0; | ||
1335 | } | ||
1336 | if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) { | ||
1337 | u32temp = nes_read_indexed(nesdev, | ||
1338 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); | ||
1339 | u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; | ||
1340 | nes_write_indexed(nesdev, | ||
1341 | NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); | ||
1342 | nesdev->disable_tx_flow_control = 0; | ||
1343 | } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { | ||
1344 | u32temp = nes_read_indexed(nesdev, | ||
1345 | NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); | ||
1346 | u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; | ||
1347 | nes_write_indexed(nesdev, | ||
1348 | NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); | ||
1349 | nesdev->disable_tx_flow_control = 1; | ||
1350 | } | ||
1351 | if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { | ||
1352 | u32temp = nes_read_indexed(nesdev, | ||
1353 | NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40)); | ||
1354 | u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE; | ||
1355 | nes_write_indexed(nesdev, | ||
1356 | NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp); | ||
1357 | nesdev->disable_rx_flow_control = 0; | ||
1358 | } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) { | ||
1359 | u32temp = nes_read_indexed(nesdev, | ||
1360 | NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40)); | ||
1361 | u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE; | ||
1362 | nes_write_indexed(nesdev, | ||
1363 | NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp); | ||
1364 | nesdev->disable_rx_flow_control = 1; | ||
1365 | } | ||
1366 | |||
1367 | return 0; | ||
1368 | } | ||
1369 | |||
1370 | |||
1371 | /** | ||
1372 | * nes_netdev_get_settings | ||
1373 | */ | ||
1374 | static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) | ||
1375 | { | ||
1376 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1377 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1378 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1379 | u16 phy_data; | ||
1380 | |||
1381 | et_cmd->duplex = DUPLEX_FULL; | ||
1382 | et_cmd->port = PORT_MII; | ||
1383 | if (nesadapter->OneG_Mode) { | ||
1384 | et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg; | ||
1385 | et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg; | ||
1386 | et_cmd->speed = SPEED_1000; | ||
1387 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], | ||
1388 | &phy_data); | ||
1389 | if (phy_data&0x1000) { | ||
1390 | et_cmd->autoneg = AUTONEG_ENABLE; | ||
1391 | } else { | ||
1392 | et_cmd->autoneg = AUTONEG_DISABLE; | ||
1393 | } | ||
1394 | et_cmd->transceiver = XCVR_EXTERNAL; | ||
1395 | et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; | ||
1396 | } else { | ||
1397 | if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) { | ||
1398 | et_cmd->transceiver = XCVR_EXTERNAL; | ||
1399 | et_cmd->port = PORT_FIBRE; | ||
1400 | et_cmd->supported = SUPPORTED_FIBRE; | ||
1401 | et_cmd->advertising = ADVERTISED_FIBRE; | ||
1402 | et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; | ||
1403 | } else { | ||
1404 | et_cmd->transceiver = XCVR_INTERNAL; | ||
1405 | et_cmd->supported = SUPPORTED_10000baseT_Full; | ||
1406 | et_cmd->advertising = ADVERTISED_10000baseT_Full; | ||
1407 | et_cmd->phy_address = nesdev->mac_index; | ||
1408 | } | ||
1409 | et_cmd->speed = SPEED_10000; | ||
1410 | et_cmd->autoneg = AUTONEG_DISABLE; | ||
1411 | } | ||
1412 | et_cmd->maxtxpkt = 511; | ||
1413 | et_cmd->maxrxpkt = 511; | ||
1414 | return 0; | ||
1415 | } | ||
1416 | |||
1417 | |||
1418 | /** | ||
1419 | * nes_netdev_set_settings | ||
1420 | */ | ||
1421 | static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) | ||
1422 | { | ||
1423 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1424 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1425 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1426 | u16 phy_data; | ||
1427 | |||
1428 | if (nesadapter->OneG_Mode) { | ||
1429 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], | ||
1430 | &phy_data); | ||
1431 | if (et_cmd->autoneg) { | ||
1432 | /* Turn on Full duplex, Autoneg, and restart autonegotiation */ | ||
1433 | phy_data |= 0x1300; | ||
1434 | } else { | ||
1435 | // Turn off autoneg | ||
1436 | phy_data &= ~0x1000; | ||
1437 | } | ||
1438 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], | ||
1439 | phy_data); | ||
1440 | } | ||
1441 | |||
1442 | return 0; | ||
1443 | } | ||
1444 | |||
1445 | |||
1446 | static struct ethtool_ops nes_ethtool_ops = { | ||
1447 | .get_link = ethtool_op_get_link, | ||
1448 | .get_settings = nes_netdev_get_settings, | ||
1449 | .set_settings = nes_netdev_set_settings, | ||
1450 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
1451 | .get_rx_csum = nes_netdev_get_rx_csum, | ||
1452 | .get_sg = ethtool_op_get_sg, | ||
1453 | .get_strings = nes_netdev_get_strings, | ||
1454 | .get_stats_count = nes_netdev_get_stats_count, | ||
1455 | .get_ethtool_stats = nes_netdev_get_ethtool_stats, | ||
1456 | .get_drvinfo = nes_netdev_get_drvinfo, | ||
1457 | .get_coalesce = nes_netdev_get_coalesce, | ||
1458 | .set_coalesce = nes_netdev_set_coalesce, | ||
1459 | .get_pauseparam = nes_netdev_get_pauseparam, | ||
1460 | .set_pauseparam = nes_netdev_set_pauseparam, | ||
1461 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
1462 | .set_rx_csum = nes_netdev_set_rx_csum, | ||
1463 | .set_sg = ethtool_op_set_sg, | ||
1464 | .get_tso = ethtool_op_get_tso, | ||
1465 | .set_tso = ethtool_op_set_tso, | ||
1466 | }; | ||
1467 | |||
1468 | |||
1469 | static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | ||
1470 | { | ||
1471 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1472 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1473 | u32 u32temp; | ||
1474 | |||
1475 | nesvnic->vlan_grp = grp; | ||
1476 | |||
1477 | /* Enable/Disable VLAN Stripping */ | ||
1478 | u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); | ||
1479 | if (grp) | ||
1480 | u32temp &= 0xfdffffff; | ||
1481 | else | ||
1482 | u32temp |= 0x02000000; | ||
1483 | |||
1484 | nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp); | ||
1485 | } | ||
1486 | |||
1487 | |||
1488 | /** | ||
1489 | * nes_netdev_init - initialize network device | ||
1490 | */ | ||
1491 | struct net_device *nes_netdev_init(struct nes_device *nesdev, | ||
1492 | void __iomem *mmio_addr) | ||
1493 | { | ||
1494 | u64 u64temp; | ||
1495 | struct nes_vnic *nesvnic = NULL; | ||
1496 | struct net_device *netdev; | ||
1497 | struct nic_qp_map *curr_qp_map; | ||
1498 | u32 u32temp; | ||
1499 | u16 phy_data; | ||
1500 | u16 temp_phy_data; | ||
1501 | |||
1502 | netdev = alloc_etherdev(sizeof(struct nes_vnic)); | ||
1503 | if (!netdev) { | ||
1504 | printk(KERN_ERR PFX "nesvnic etherdev alloc failed"); | ||
1505 | return NULL; | ||
1506 | } | ||
1507 | |||
1508 | nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name); | ||
1509 | |||
1510 | SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev); | ||
1511 | |||
1512 | nesvnic = netdev_priv(netdev); | ||
1513 | memset(nesvnic, 0, sizeof(*nesvnic)); | ||
1514 | |||
1515 | netdev->open = nes_netdev_open; | ||
1516 | netdev->stop = nes_netdev_stop; | ||
1517 | netdev->hard_start_xmit = nes_netdev_start_xmit; | ||
1518 | netdev->get_stats = nes_netdev_get_stats; | ||
1519 | netdev->tx_timeout = nes_netdev_tx_timeout; | ||
1520 | netdev->set_mac_address = nes_netdev_set_mac_address; | ||
1521 | netdev->set_multicast_list = nes_netdev_set_multicast_list; | ||
1522 | netdev->change_mtu = nes_netdev_change_mtu; | ||
1523 | netdev->watchdog_timeo = NES_TX_TIMEOUT; | ||
1524 | netdev->irq = nesdev->pcidev->irq; | ||
1525 | netdev->mtu = ETH_DATA_LEN; | ||
1526 | netdev->hard_header_len = ETH_HLEN; | ||
1527 | netdev->addr_len = ETH_ALEN; | ||
1528 | netdev->type = ARPHRD_ETHER; | ||
1529 | netdev->features = NETIF_F_HIGHDMA; | ||
1530 | netdev->ethtool_ops = &nes_ethtool_ops; | ||
1531 | netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); | ||
1532 | nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); | ||
1533 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1534 | netdev->vlan_rx_register = nes_netdev_vlan_rx_register; | ||
1535 | netdev->features |= NETIF_F_LLTX; | ||
1536 | |||
1537 | /* Fill in the port structure */ | ||
1538 | nesvnic->netdev = netdev; | ||
1539 | nesvnic->nesdev = nesdev; | ||
1540 | nesvnic->msg_enable = netif_msg_init(debug, default_msg); | ||
1541 | nesvnic->netdev_index = nesdev->netdev_count; | ||
1542 | nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count; | ||
1543 | nesvnic->max_frame_size = netdev->mtu+netdev->hard_header_len; | ||
1544 | |||
1545 | curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)]; | ||
1546 | nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid; | ||
1547 | nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index; | ||
1548 | nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port; | ||
1549 | |||
1550 | /* Setup the burned in MAC address */ | ||
1551 | u64temp = (u64)nesdev->nesadapter->mac_addr_low; | ||
1552 | u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32; | ||
1553 | u64temp += nesvnic->nic_index; | ||
1554 | netdev->dev_addr[0] = (u8)(u64temp>>40); | ||
1555 | netdev->dev_addr[1] = (u8)(u64temp>>32); | ||
1556 | netdev->dev_addr[2] = (u8)(u64temp>>24); | ||
1557 | netdev->dev_addr[3] = (u8)(u64temp>>16); | ||
1558 | netdev->dev_addr[4] = (u8)(u64temp>>8); | ||
1559 | netdev->dev_addr[5] = (u8)u64temp; | ||
1560 | memcpy(netdev->perm_addr, netdev->dev_addr, 6); | ||
1561 | |||
1562 | if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) { | ||
1563 | netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1564 | netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1565 | } else { | ||
1566 | netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1567 | } | ||
1568 | |||
1569 | nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," | ||
1570 | " nic_index = %d, logical_port = %d, mac_index = %d.\n", | ||
1571 | nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id, | ||
1572 | nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index); | ||
1573 | |||
1574 | if (nesvnic->nesdev->nesadapter->port_count == 1) { | ||
1575 | nesvnic->qp_nic_index[0] = nesvnic->nic_index; | ||
1576 | nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1; | ||
1577 | if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) { | ||
1578 | nesvnic->qp_nic_index[2] = 0xf; | ||
1579 | nesvnic->qp_nic_index[3] = 0xf; | ||
1580 | } else { | ||
1581 | nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2; | ||
1582 | nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3; | ||
1583 | } | ||
1584 | } else { | ||
1585 | if (nesvnic->nesdev->nesadapter->port_count == 2) { | ||
1586 | nesvnic->qp_nic_index[0] = nesvnic->nic_index; | ||
1587 | nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2; | ||
1588 | nesvnic->qp_nic_index[2] = 0xf; | ||
1589 | nesvnic->qp_nic_index[3] = 0xf; | ||
1590 | } else { | ||
1591 | nesvnic->qp_nic_index[0] = nesvnic->nic_index; | ||
1592 | nesvnic->qp_nic_index[1] = 0xf; | ||
1593 | nesvnic->qp_nic_index[2] = 0xf; | ||
1594 | nesvnic->qp_nic_index[3] = 0xf; | ||
1595 | } | ||
1596 | } | ||
1597 | nesvnic->next_qp_nic_index = 0; | ||
1598 | |||
1599 | if (nesdev->netdev_count == 0) { | ||
1600 | nesvnic->rdma_enabled = 1; | ||
1601 | } else { | ||
1602 | nesvnic->rdma_enabled = 0; | ||
1603 | } | ||
1604 | nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; | ||
1605 | spin_lock_init(&nesvnic->tx_lock); | ||
1606 | nesdev->netdev[nesdev->netdev_count] = netdev; | ||
1607 | |||
1608 | nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", | ||
1609 | nesvnic, nesdev->mac_index); | ||
1610 | list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]); | ||
1611 | |||
1612 | if ((nesdev->netdev_count == 0) && | ||
1613 | (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) { | ||
1614 | nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n", | ||
1615 | NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1))); | ||
1616 | u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + | ||
1617 | (0x200*(nesvnic->logical_port&1))); | ||
1618 | u32temp |= 0x00200000; | ||
1619 | nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + | ||
1620 | (0x200*(nesvnic->logical_port&1)), u32temp); | ||
1621 | u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + | ||
1622 | (0x200*(nesvnic->logical_port&1)) ); | ||
1623 | if ((u32temp&0x0f1f0000) == 0x0f0f0000) { | ||
1624 | if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) { | ||
1625 | nes_init_phy(nesdev); | ||
1626 | nes_read_10G_phy_reg(nesdev, 1, | ||
1627 | nesdev->nesadapter->phy_index[nesvnic->logical_port]); | ||
1628 | temp_phy_data = (u16)nes_read_indexed(nesdev, | ||
1629 | NES_IDX_MAC_MDIO_CONTROL); | ||
1630 | u32temp = 20; | ||
1631 | do { | ||
1632 | nes_read_10G_phy_reg(nesdev, 1, | ||
1633 | nesdev->nesadapter->phy_index[nesvnic->logical_port]); | ||
1634 | phy_data = (u16)nes_read_indexed(nesdev, | ||
1635 | NES_IDX_MAC_MDIO_CONTROL); | ||
1636 | if ((phy_data == temp_phy_data) || (!(--u32temp))) | ||
1637 | break; | ||
1638 | temp_phy_data = phy_data; | ||
1639 | } while (1); | ||
1640 | if (phy_data & 4) { | ||
1641 | nes_debug(NES_DBG_INIT, "The Link is UP!!.\n"); | ||
1642 | nesvnic->linkup = 1; | ||
1643 | } else { | ||
1644 | nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n"); | ||
1645 | } | ||
1646 | } else { | ||
1647 | nes_debug(NES_DBG_INIT, "The Link is UP!!.\n"); | ||
1648 | nesvnic->linkup = 1; | ||
1649 | } | ||
1650 | } | ||
1651 | nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n"); | ||
1652 | /* clear the MAC interrupt status, assumes direct logical to physical mapping */ | ||
1653 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port)); | ||
1654 | nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); | ||
1655 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port), u32temp); | ||
1656 | |||
1657 | if (nesdev->nesadapter->phy_type[nesvnic->logical_port] != NES_PHY_TYPE_IRIS) | ||
1658 | nes_init_phy(nesdev); | ||
1659 | |||
1660 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesvnic->logical_port), | ||
1661 | ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT | | ||
1662 | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); | ||
1663 | } | ||
1664 | |||
1665 | return netdev; | ||
1666 | } | ||
1667 | |||
1668 | |||
1669 | /** | ||
1670 | * nes_netdev_destroy - destroy network device structure | ||
1671 | */ | ||
1672 | void nes_netdev_destroy(struct net_device *netdev) | ||
1673 | { | ||
1674 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
1675 | |||
1676 | /* make sure 'stop' method is called by Linux stack */ | ||
1677 | /* nes_netdev_stop(netdev); */ | ||
1678 | |||
1679 | list_del(&nesvnic->list); | ||
1680 | |||
1681 | if (nesvnic->of_device_registered) { | ||
1682 | nes_destroy_ofa_device(nesvnic->nesibdev); | ||
1683 | } | ||
1684 | |||
1685 | free_netdev(netdev); | ||
1686 | } | ||
1687 | |||
1688 | |||
1689 | /** | ||
1690 | * nes_nic_cm_xmit -- CM calls this to send out pkts | ||
1691 | */ | ||
1692 | int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
1693 | { | ||
1694 | int ret; | ||
1695 | |||
1696 | skb->dev = netdev; | ||
1697 | ret = dev_queue_xmit(skb); | ||
1698 | if (ret) { | ||
1699 | nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret); | ||
1700 | } | ||
1701 | |||
1702 | return ret; | ||
1703 | } | ||
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h new file mode 100644 index 000000000000..e64306bce80b --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_user.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect. All rights reserved. | ||
3 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | ||
4 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
5 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
6 | * | ||
7 | * This software is available to you under a choice of one of two | ||
8 | * licenses. You may choose to be licensed under the terms of the GNU | ||
9 | * General Public License (GPL) Version 2, available from the file | ||
10 | * COPYING in the main directory of this source tree, or the | ||
11 | * OpenIB.org BSD license below: | ||
12 | * | ||
13 | * Redistribution and use in source and binary forms, with or | ||
14 | * without modification, are permitted provided that the following | ||
15 | * conditions are met: | ||
16 | * | ||
17 | * - Redistributions of source code must retain the above | ||
18 | * copyright notice, this list of conditions and the following | ||
19 | * disclaimer. | ||
20 | * | ||
21 | * - Redistributions in binary form must reproduce the above | ||
22 | * copyright notice, this list of conditions and the following | ||
23 | * disclaimer in the documentation and/or other materials | ||
24 | * provided with the distribution. | ||
25 | * | ||
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
33 | * SOFTWARE. | ||
34 | * | ||
35 | */ | ||
36 | |||
37 | #ifndef NES_USER_H | ||
38 | #define NES_USER_H | ||
39 | |||
40 | #include <linux/types.h> | ||
41 | |||
42 | #define NES_ABI_USERSPACE_VER 1 | ||
43 | #define NES_ABI_KERNEL_VER 1 | ||
44 | |||
45 | /* | ||
46 | * Make sure that all structs defined in this file remain laid out so | ||
47 | * that they pack the same way on 32-bit and 64-bit architectures (to | ||
48 | * avoid incompatibility between 32-bit userspace and 64-bit kernels). | ||
49 | * In particular do not use pointer types -- pass pointers in __u64 | ||
50 | * instead. | ||
51 | */ | ||
52 | |||
53 | struct nes_alloc_ucontext_req { | ||
54 | __u32 reserved32; | ||
55 | __u8 userspace_ver; | ||
56 | __u8 reserved8[3]; | ||
57 | }; | ||
58 | |||
59 | struct nes_alloc_ucontext_resp { | ||
60 | __u32 max_pds; /* maximum pds allowed for this user process */ | ||
61 | __u32 max_qps; /* maximum qps allowed for this user process */ | ||
62 | __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ | ||
63 | __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */ | ||
64 | __u8 kernel_ver; | ||
65 | __u8 reserved[2]; | ||
66 | }; | ||
67 | |||
68 | struct nes_alloc_pd_resp { | ||
69 | __u32 pd_id; | ||
70 | __u32 mmap_db_index; | ||
71 | }; | ||
72 | |||
73 | struct nes_create_cq_req { | ||
74 | __u64 user_cq_buffer; | ||
75 | __u32 mcrqf; | ||
76 | __u8 reserved[4]; | ||
77 | }; | ||
78 | |||
79 | struct nes_create_qp_req { | ||
80 | __u64 user_wqe_buffers; | ||
81 | }; | ||
82 | |||
83 | enum iwnes_memreg_type { | ||
84 | IWNES_MEMREG_TYPE_MEM = 0x0000, | ||
85 | IWNES_MEMREG_TYPE_QP = 0x0001, | ||
86 | IWNES_MEMREG_TYPE_CQ = 0x0002, | ||
87 | IWNES_MEMREG_TYPE_MW = 0x0003, | ||
88 | IWNES_MEMREG_TYPE_FMR = 0x0004, | ||
89 | }; | ||
90 | |||
91 | struct nes_mem_reg_req { | ||
92 | __u32 reg_type; /* indicates if id is memory, QP or CQ */ | ||
93 | __u32 reserved; | ||
94 | }; | ||
95 | |||
96 | struct nes_create_cq_resp { | ||
97 | __u32 cq_id; | ||
98 | __u32 cq_size; | ||
99 | __u32 mmap_db_index; | ||
100 | __u32 reserved; | ||
101 | }; | ||
102 | |||
103 | struct nes_create_qp_resp { | ||
104 | __u32 qp_id; | ||
105 | __u32 actual_sq_size; | ||
106 | __u32 actual_rq_size; | ||
107 | __u32 mmap_sq_db_index; | ||
108 | __u32 mmap_rq_db_index; | ||
109 | __u32 nes_drv_opt; | ||
110 | }; | ||
111 | |||
112 | #endif /* NES_USER_H */ | ||
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c new file mode 100644 index 000000000000..c4ec6ac63461 --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
@@ -0,0 +1,917 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ethtool.h> | ||
39 | #include <linux/mii.h> | ||
40 | #include <linux/if_vlan.h> | ||
41 | #include <linux/crc32.h> | ||
42 | #include <linux/in.h> | ||
43 | #include <linux/ip.h> | ||
44 | #include <linux/tcp.h> | ||
45 | #include <linux/init.h> | ||
46 | |||
47 | #include <asm/io.h> | ||
48 | #include <asm/irq.h> | ||
49 | #include <asm/byteorder.h> | ||
50 | |||
51 | #include "nes.h" | ||
52 | |||
53 | |||
54 | |||
55 | static u16 nes_read16_eeprom(void __iomem *addr, u16 offset); | ||
56 | |||
57 | u32 mh_detected; | ||
58 | u32 mh_pauses_sent; | ||
59 | |||
60 | /** | ||
61 | * nes_read_eeprom_values - | ||
62 | */ | ||
63 | int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter) | ||
64 | { | ||
65 | u32 mac_addr_low; | ||
66 | u16 mac_addr_high; | ||
67 | u16 eeprom_data; | ||
68 | u16 eeprom_offset; | ||
69 | u16 next_section_address; | ||
70 | u16 sw_section_ver; | ||
71 | u8 major_ver = 0; | ||
72 | u8 minor_ver = 0; | ||
73 | |||
74 | /* TODO: deal with EEPROM endian issues */ | ||
75 | if (nesadapter->firmware_eeprom_offset == 0) { | ||
76 | /* Read the EEPROM Parameters */ | ||
77 | eeprom_data = nes_read16_eeprom(nesdev->regs, 0); | ||
78 | nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data); | ||
79 | eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) << | ||
80 | ((eeprom_data & 0x0080) >> 7)); | ||
81 | nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset); | ||
82 | nesadapter->firmware_eeprom_offset = eeprom_offset; | ||
83 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4); | ||
84 | if (eeprom_data != 0x5746) { | ||
85 | nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data); | ||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
90 | nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n", | ||
91 | eeprom_offset + 2, eeprom_data); | ||
92 | eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8); | ||
93 | nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset); | ||
94 | nesadapter->software_eeprom_offset = eeprom_offset; | ||
95 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4); | ||
96 | if (eeprom_data != 0x5753) { | ||
97 | printk("Not a valid Software Image = 0x%04X\n", eeprom_data); | ||
98 | return -1; | ||
99 | } | ||
100 | sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6); | ||
101 | nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n", | ||
102 | sw_section_ver); | ||
103 | |||
104 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
105 | nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", | ||
106 | eeprom_offset + 2, eeprom_data); | ||
107 | next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) << | ||
108 | ((eeprom_data & 0x0100) >> 8)); | ||
109 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); | ||
110 | if (eeprom_data != 0x414d) { | ||
111 | nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n", | ||
112 | eeprom_data); | ||
113 | goto no_fw_rev; | ||
114 | } | ||
115 | eeprom_offset = next_section_address; | ||
116 | |||
117 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
118 | nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", | ||
119 | eeprom_offset + 2, eeprom_data); | ||
120 | next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) << | ||
121 | ((eeprom_data & 0x0100) >> 8)); | ||
122 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); | ||
123 | if (eeprom_data != 0x4f52) { | ||
124 | nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n", | ||
125 | eeprom_data); | ||
126 | goto no_fw_rev; | ||
127 | } | ||
128 | eeprom_offset = next_section_address; | ||
129 | |||
130 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
131 | nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", | ||
132 | eeprom_offset + 2, eeprom_data); | ||
133 | next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); | ||
134 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); | ||
135 | if (eeprom_data != 0x5746) { | ||
136 | nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n", | ||
137 | eeprom_data); | ||
138 | goto no_fw_rev; | ||
139 | } | ||
140 | eeprom_offset = next_section_address; | ||
141 | |||
142 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
143 | nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", | ||
144 | eeprom_offset + 2, eeprom_data); | ||
145 | next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); | ||
146 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); | ||
147 | if (eeprom_data != 0x5753) { | ||
148 | nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n", | ||
149 | eeprom_data); | ||
150 | goto no_fw_rev; | ||
151 | } | ||
152 | eeprom_offset = next_section_address; | ||
153 | |||
154 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
155 | nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", | ||
156 | eeprom_offset + 2, eeprom_data); | ||
157 | next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); | ||
158 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); | ||
159 | if (eeprom_data != 0x414d) { | ||
160 | nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n", | ||
161 | eeprom_data); | ||
162 | goto no_fw_rev; | ||
163 | } | ||
164 | eeprom_offset = next_section_address; | ||
165 | |||
166 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2); | ||
167 | nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n", | ||
168 | eeprom_offset + 2, eeprom_data); | ||
169 | next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3); | ||
170 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4); | ||
171 | if (eeprom_data != 0x464e) { | ||
172 | nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n", | ||
173 | eeprom_data); | ||
174 | goto no_fw_rev; | ||
175 | } | ||
176 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8); | ||
177 | printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data); | ||
178 | major_ver = (u8)(eeprom_data >> 8); | ||
179 | minor_ver = (u8)(eeprom_data); | ||
180 | |||
181 | if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) { | ||
182 | nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n"); | ||
183 | } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { | ||
184 | nesadapter->virtwq = 1; | ||
185 | } | ||
186 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + | ||
187 | (u32)((u8)eeprom_data); | ||
188 | |||
189 | no_fw_rev: | ||
190 | /* eeprom is valid */ | ||
191 | eeprom_offset = nesadapter->software_eeprom_offset; | ||
192 | eeprom_offset += 8; | ||
193 | nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
194 | eeprom_offset += 2; | ||
195 | mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
196 | eeprom_offset += 2; | ||
197 | mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
198 | eeprom_offset += 2; | ||
199 | mac_addr_low <<= 16; | ||
200 | mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
201 | nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n", | ||
202 | mac_addr_high, mac_addr_low); | ||
203 | nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max); | ||
204 | |||
205 | nesadapter->mac_addr_low = mac_addr_low; | ||
206 | nesadapter->mac_addr_high = mac_addr_high; | ||
207 | |||
208 | /* Read the Phy Type array */ | ||
209 | eeprom_offset += 10; | ||
210 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
211 | nesadapter->phy_type[0] = (u8)(eeprom_data >> 8); | ||
212 | nesadapter->phy_type[1] = (u8)eeprom_data; | ||
213 | |||
214 | /* Read the port array */ | ||
215 | eeprom_offset += 2; | ||
216 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
217 | nesadapter->phy_type[2] = (u8)(eeprom_data >> 8); | ||
218 | nesadapter->phy_type[3] = (u8)eeprom_data; | ||
219 | /* port_count is set by soft reset reg */ | ||
220 | nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u," | ||
221 | " port 2 -> %u, port 3 -> %u\n", | ||
222 | nesadapter->port_count, | ||
223 | nesadapter->phy_type[0], nesadapter->phy_type[1], | ||
224 | nesadapter->phy_type[2], nesadapter->phy_type[3]); | ||
225 | |||
226 | /* Read PD config array */ | ||
227 | eeprom_offset += 10; | ||
228 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
229 | nesadapter->pd_config_size[0] = eeprom_data; | ||
230 | eeprom_offset += 2; | ||
231 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
232 | nesadapter->pd_config_base[0] = eeprom_data; | ||
233 | nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n", | ||
234 | nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]); | ||
235 | |||
236 | eeprom_offset += 2; | ||
237 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
238 | nesadapter->pd_config_size[1] = eeprom_data; | ||
239 | eeprom_offset += 2; | ||
240 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
241 | nesadapter->pd_config_base[1] = eeprom_data; | ||
242 | nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n", | ||
243 | nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]); | ||
244 | |||
245 | eeprom_offset += 2; | ||
246 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
247 | nesadapter->pd_config_size[2] = eeprom_data; | ||
248 | eeprom_offset += 2; | ||
249 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
250 | nesadapter->pd_config_base[2] = eeprom_data; | ||
251 | nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n", | ||
252 | nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]); | ||
253 | |||
254 | eeprom_offset += 2; | ||
255 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
256 | nesadapter->pd_config_size[3] = eeprom_data; | ||
257 | eeprom_offset += 2; | ||
258 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
259 | nesadapter->pd_config_base[3] = eeprom_data; | ||
260 | nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n", | ||
261 | nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]); | ||
262 | |||
263 | /* Read Rx Pool Size */ | ||
264 | eeprom_offset += 22; /* 46 */ | ||
265 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
266 | eeprom_offset += 2; | ||
267 | nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) + | ||
268 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
269 | nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size); | ||
270 | |||
271 | eeprom_offset += 2; | ||
272 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
273 | eeprom_offset += 2; | ||
274 | nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) + | ||
275 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
276 | nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size); | ||
277 | |||
278 | eeprom_offset += 2; | ||
279 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
280 | eeprom_offset += 2; | ||
281 | nesadapter->rx_threshold = (((u32)eeprom_data) << 16) + | ||
282 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
283 | nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold); | ||
284 | |||
285 | eeprom_offset += 2; | ||
286 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
287 | eeprom_offset += 2; | ||
288 | nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) + | ||
289 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
290 | nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n", | ||
291 | nesadapter->tcp_timer_core_clk_divisor); | ||
292 | |||
293 | eeprom_offset += 2; | ||
294 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
295 | eeprom_offset += 2; | ||
296 | nesadapter->iwarp_config = (((u32)eeprom_data) << 16) + | ||
297 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
298 | nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config); | ||
299 | |||
300 | eeprom_offset += 2; | ||
301 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
302 | eeprom_offset += 2; | ||
303 | nesadapter->cm_config = (((u32)eeprom_data) << 16) + | ||
304 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
305 | nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config); | ||
306 | |||
307 | eeprom_offset += 2; | ||
308 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
309 | eeprom_offset += 2; | ||
310 | nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) + | ||
311 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
312 | nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config); | ||
313 | |||
314 | eeprom_offset += 2; | ||
315 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
316 | eeprom_offset += 2; | ||
317 | nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) + | ||
318 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
319 | nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1); | ||
320 | |||
321 | eeprom_offset += 2; | ||
322 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
323 | eeprom_offset += 2; | ||
324 | nesadapter->wqm_wat = (((u32)eeprom_data) << 16) + | ||
325 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
326 | nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat); | ||
327 | |||
328 | eeprom_offset += 2; | ||
329 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
330 | eeprom_offset += 2; | ||
331 | nesadapter->core_clock = (((u32)eeprom_data) << 16) + | ||
332 | nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
333 | nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock); | ||
334 | |||
335 | if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) { | ||
336 | eeprom_offset += 2; | ||
337 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
338 | nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8; | ||
339 | nesadapter->phy_index[1] = eeprom_data & 0x00ff; | ||
340 | eeprom_offset += 2; | ||
341 | eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset); | ||
342 | nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8; | ||
343 | nesadapter->phy_index[3] = eeprom_data & 0x00ff; | ||
344 | } else { | ||
345 | nesadapter->phy_index[0] = 4; | ||
346 | nesadapter->phy_index[1] = 5; | ||
347 | nesadapter->phy_index[2] = 6; | ||
348 | nesadapter->phy_index[3] = 7; | ||
349 | } | ||
350 | nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n", | ||
351 | nesadapter->phy_index[0],nesadapter->phy_index[1], | ||
352 | nesadapter->phy_index[2],nesadapter->phy_index[3]); | ||
353 | } | ||
354 | |||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | |||
359 | /** | ||
360 | * nes_read16_eeprom | ||
361 | */ | ||
362 | static u16 nes_read16_eeprom(void __iomem *addr, u16 offset) | ||
363 | { | ||
364 | writel(NES_EEPROM_READ_REQUEST + (offset >> 1), | ||
365 | (void __iomem *)addr + NES_EEPROM_COMMAND); | ||
366 | |||
367 | do { | ||
368 | } while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) & | ||
369 | NES_EEPROM_READ_REQUEST); | ||
370 | |||
371 | return readw((void __iomem *)addr + NES_EEPROM_DATA); | ||
372 | } | ||
373 | |||
374 | |||
375 | /** | ||
376 | * nes_write_1G_phy_reg | ||
377 | */ | ||
378 | void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) | ||
379 | { | ||
380 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
381 | u32 u32temp; | ||
382 | u32 counter; | ||
383 | unsigned long flags; | ||
384 | |||
385 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
386 | |||
387 | nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, | ||
388 | 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); | ||
389 | for (counter = 0; counter < 100 ; counter++) { | ||
390 | udelay(30); | ||
391 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); | ||
392 | if (u32temp & 1) { | ||
393 | /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */ | ||
394 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); | ||
395 | break; | ||
396 | } | ||
397 | } | ||
398 | if (!(u32temp & 1)) | ||
399 | nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", | ||
400 | u32temp); | ||
401 | |||
402 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
403 | } | ||
404 | |||
405 | |||
406 | /** | ||
407 | * nes_read_1G_phy_reg | ||
408 | * This routine only issues the read, the data must be read | ||
409 | * separately. | ||
410 | */ | ||
411 | void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) | ||
412 | { | ||
413 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
414 | u32 u32temp; | ||
415 | u32 counter; | ||
416 | unsigned long flags; | ||
417 | |||
418 | /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", | ||
419 | phy_addr, nesdev->mac_index); */ | ||
420 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
421 | |||
422 | nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, | ||
423 | 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); | ||
424 | for (counter = 0; counter < 100 ; counter++) { | ||
425 | udelay(30); | ||
426 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); | ||
427 | if (u32temp & 1) { | ||
428 | /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */ | ||
429 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); | ||
430 | break; | ||
431 | } | ||
432 | } | ||
433 | if (!(u32temp & 1)) { | ||
434 | nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", | ||
435 | u32temp); | ||
436 | *data = 0xffff; | ||
437 | } else { | ||
438 | *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | ||
439 | } | ||
440 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
441 | } | ||
442 | |||
443 | |||
444 | /** | ||
445 | * nes_write_10G_phy_reg | ||
446 | */ | ||
447 | void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, | ||
448 | u8 phy_addr, u16 data) | ||
449 | { | ||
450 | u32 dev_addr; | ||
451 | u32 port_addr; | ||
452 | u32 u32temp; | ||
453 | u32 counter; | ||
454 | |||
455 | dev_addr = 1; | ||
456 | port_addr = phy_addr; | ||
457 | |||
458 | /* set address */ | ||
459 | nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, | ||
460 | 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); | ||
461 | for (counter = 0; counter < 100 ; counter++) { | ||
462 | udelay(30); | ||
463 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); | ||
464 | if (u32temp & 1) { | ||
465 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); | ||
466 | break; | ||
467 | } | ||
468 | } | ||
469 | if (!(u32temp & 1)) | ||
470 | nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", | ||
471 | u32temp); | ||
472 | |||
473 | /* set data */ | ||
474 | nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, | ||
475 | 0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); | ||
476 | for (counter = 0; counter < 100 ; counter++) { | ||
477 | udelay(30); | ||
478 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); | ||
479 | if (u32temp & 1) { | ||
480 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); | ||
481 | break; | ||
482 | } | ||
483 | } | ||
484 | if (!(u32temp & 1)) | ||
485 | nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", | ||
486 | u32temp); | ||
487 | } | ||
488 | |||
489 | |||
490 | /** | ||
491 | * nes_read_10G_phy_reg | ||
492 | * This routine only issues the read, the data must be read | ||
493 | * separately. | ||
494 | */ | ||
495 | void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr) | ||
496 | { | ||
497 | u32 dev_addr; | ||
498 | u32 port_addr; | ||
499 | u32 u32temp; | ||
500 | u32 counter; | ||
501 | |||
502 | dev_addr = 1; | ||
503 | port_addr = phy_addr; | ||
504 | |||
505 | /* set address */ | ||
506 | nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, | ||
507 | 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); | ||
508 | for (counter = 0; counter < 100 ; counter++) { | ||
509 | udelay(30); | ||
510 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); | ||
511 | if (u32temp & 1) { | ||
512 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); | ||
513 | break; | ||
514 | } | ||
515 | } | ||
516 | if (!(u32temp & 1)) | ||
517 | nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", | ||
518 | u32temp); | ||
519 | |||
520 | /* issue read */ | ||
521 | nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, | ||
522 | 0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23)); | ||
523 | for (counter = 0; counter < 100 ; counter++) { | ||
524 | udelay(30); | ||
525 | u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS); | ||
526 | if (u32temp & 1) { | ||
527 | nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1); | ||
528 | break; | ||
529 | } | ||
530 | } | ||
531 | if (!(u32temp & 1)) | ||
532 | nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", | ||
533 | u32temp); | ||
534 | } | ||
535 | |||
536 | |||
537 | /** | ||
538 | * nes_get_cqp_request | ||
539 | */ | ||
540 | struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) | ||
541 | { | ||
542 | unsigned long flags; | ||
543 | struct nes_cqp_request *cqp_request = NULL; | ||
544 | |||
545 | if (!list_empty(&nesdev->cqp_avail_reqs)) { | ||
546 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
547 | cqp_request = list_entry(nesdev->cqp_avail_reqs.next, | ||
548 | struct nes_cqp_request, list); | ||
549 | list_del_init(&cqp_request->list); | ||
550 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
551 | } else { | ||
552 | cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); | ||
553 | if (cqp_request) { | ||
554 | cqp_request->dynamic = 1; | ||
555 | INIT_LIST_HEAD(&cqp_request->list); | ||
556 | } | ||
557 | } | ||
558 | |||
559 | if (cqp_request) { | ||
560 | init_waitqueue_head(&cqp_request->waitq); | ||
561 | cqp_request->waiting = 0; | ||
562 | cqp_request->request_done = 0; | ||
563 | cqp_request->callback = 0; | ||
564 | init_waitqueue_head(&cqp_request->waitq); | ||
565 | nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n", | ||
566 | cqp_request); | ||
567 | } else | ||
568 | printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n", | ||
569 | __FUNCTION__); | ||
570 | |||
571 | return cqp_request; | ||
572 | } | ||
573 | |||
574 | |||
575 | /** | ||
576 | * nes_post_cqp_request | ||
577 | */ | ||
578 | void nes_post_cqp_request(struct nes_device *nesdev, | ||
579 | struct nes_cqp_request *cqp_request, int ring_doorbell) | ||
580 | { | ||
581 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
582 | unsigned long flags; | ||
583 | u32 cqp_head; | ||
584 | u64 u64temp; | ||
585 | |||
586 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
587 | |||
588 | if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) & | ||
589 | (nesdev->cqp.sq_size - 1)) != 1) | ||
590 | && (list_empty(&nesdev->cqp_pending_reqs))) { | ||
591 | cqp_head = nesdev->cqp.sq_head++; | ||
592 | nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1; | ||
593 | cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; | ||
594 | memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe)); | ||
595 | barrier(); | ||
596 | u64temp = (unsigned long)cqp_request; | ||
597 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX, | ||
598 | u64temp); | ||
599 | nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ," | ||
600 | " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u," | ||
601 | " waiting = %d, refcount = %d.\n", | ||
602 | le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, | ||
603 | le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request, | ||
604 | nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size, | ||
605 | cqp_request->waiting, atomic_read(&cqp_request->refcount)); | ||
606 | barrier(); | ||
607 | if (ring_doorbell) { | ||
608 | /* Ring doorbell (1 WQEs) */ | ||
609 | nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id); | ||
610 | } | ||
611 | |||
612 | barrier(); | ||
613 | } else { | ||
614 | nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X" | ||
615 | " put on the pending queue.\n", | ||
616 | cqp_request, | ||
617 | le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, | ||
618 | le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX])); | ||
619 | list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs); | ||
620 | } | ||
621 | |||
622 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
623 | |||
624 | return; | ||
625 | } | ||
626 | |||
627 | |||
628 | /** | ||
629 | * nes_arp_table | ||
630 | */ | ||
631 | int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action) | ||
632 | { | ||
633 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
634 | int arp_index; | ||
635 | int err = 0; | ||
636 | |||
637 | for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) { | ||
638 | if (nesadapter->arp_table[arp_index].ip_addr == ip_addr) | ||
639 | break; | ||
640 | } | ||
641 | |||
642 | if (action == NES_ARP_ADD) { | ||
643 | if (arp_index != nesadapter->arp_table_size) { | ||
644 | return -1; | ||
645 | } | ||
646 | |||
647 | arp_index = 0; | ||
648 | err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps, | ||
649 | nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index); | ||
650 | if (err) { | ||
651 | nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err); | ||
652 | return err; | ||
653 | } | ||
654 | nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index); | ||
655 | |||
656 | nesadapter->arp_table[arp_index].ip_addr = ip_addr; | ||
657 | memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN); | ||
658 | return arp_index; | ||
659 | } | ||
660 | |||
661 | /* DELETE or RESOLVE */ | ||
662 | if (arp_index == nesadapter->arp_table_size) { | ||
663 | nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); | ||
664 | return -1; | ||
665 | } | ||
666 | |||
667 | if (action == NES_ARP_RESOLVE) { | ||
668 | nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index); | ||
669 | return arp_index; | ||
670 | } | ||
671 | |||
672 | if (action == NES_ARP_DELETE) { | ||
673 | nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index); | ||
674 | nesadapter->arp_table[arp_index].ip_addr = 0; | ||
675 | memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN); | ||
676 | nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index); | ||
677 | return arp_index; | ||
678 | } | ||
679 | |||
680 | return -1; | ||
681 | } | ||
682 | |||
683 | |||
684 | /** | ||
685 | * nes_mh_fix | ||
686 | */ | ||
687 | void nes_mh_fix(unsigned long parm) | ||
688 | { | ||
689 | unsigned long flags; | ||
690 | struct nes_device *nesdev = (struct nes_device *)parm; | ||
691 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
692 | struct nes_vnic *nesvnic; | ||
693 | u32 used_chunks_tx; | ||
694 | u32 temp_used_chunks_tx; | ||
695 | u32 temp_last_used_chunks_tx; | ||
696 | u32 used_chunks_mask; | ||
697 | u32 mac_tx_frames_low; | ||
698 | u32 mac_tx_frames_high; | ||
699 | u32 mac_tx_pauses; | ||
700 | u32 serdes_status; | ||
701 | u32 reset_value; | ||
702 | u32 tx_control; | ||
703 | u32 tx_config; | ||
704 | u32 tx_pause_quanta; | ||
705 | u32 rx_control; | ||
706 | u32 rx_config; | ||
707 | u32 mac_exact_match; | ||
708 | u32 mpp_debug; | ||
709 | u32 i=0; | ||
710 | u32 chunks_tx_progress = 0; | ||
711 | |||
712 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
713 | if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) { | ||
714 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
715 | goto no_mh_work; | ||
716 | } | ||
717 | nesadapter->mac_sw_state[0] = NES_MAC_SW_MH; | ||
718 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
719 | do { | ||
720 | mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW); | ||
721 | mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH); | ||
722 | mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES); | ||
723 | used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX); | ||
724 | nesdev->mac_pause_frames_sent += mac_tx_pauses; | ||
725 | used_chunks_mask = 0; | ||
726 | temp_used_chunks_tx = used_chunks_tx; | ||
727 | temp_last_used_chunks_tx = nesdev->last_used_chunks_tx; | ||
728 | |||
729 | if (nesdev->netdev[0]) { | ||
730 | nesvnic = netdev_priv(nesdev->netdev[0]); | ||
731 | } else { | ||
732 | break; | ||
733 | } | ||
734 | |||
735 | for (i=0; i<4; i++) { | ||
736 | used_chunks_mask <<= 8; | ||
737 | if (nesvnic->qp_nic_index[i] != 0xff) { | ||
738 | used_chunks_mask |= 0xff; | ||
739 | if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) { | ||
740 | chunks_tx_progress = 1; | ||
741 | } | ||
742 | } | ||
743 | temp_used_chunks_tx >>= 8; | ||
744 | temp_last_used_chunks_tx >>= 8; | ||
745 | } | ||
746 | if ((mac_tx_frames_low) || (mac_tx_frames_high) || | ||
747 | (!(used_chunks_tx&used_chunks_mask)) || | ||
748 | (!(nesdev->last_used_chunks_tx&used_chunks_mask)) || | ||
749 | (chunks_tx_progress) ) { | ||
750 | nesdev->last_used_chunks_tx = used_chunks_tx; | ||
751 | break; | ||
752 | } | ||
753 | nesdev->last_used_chunks_tx = used_chunks_tx; | ||
754 | barrier(); | ||
755 | |||
756 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005); | ||
757 | mh_pauses_sent++; | ||
758 | mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES); | ||
759 | if (mac_tx_pauses) { | ||
760 | nesdev->mac_pause_frames_sent += mac_tx_pauses; | ||
761 | break; | ||
762 | } | ||
763 | |||
764 | tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL); | ||
765 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); | ||
766 | tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA); | ||
767 | rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL); | ||
768 | rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG); | ||
769 | mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM); | ||
770 | mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG); | ||
771 | |||
772 | /* one last ditch effort to avoid a false positive */ | ||
773 | mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES); | ||
774 | if (mac_tx_pauses) { | ||
775 | nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent; | ||
776 | nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n"); | ||
777 | break; | ||
778 | } | ||
779 | mh_detected++; | ||
780 | |||
781 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000); | ||
782 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000); | ||
783 | reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); | ||
784 | |||
785 | nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d); | ||
786 | |||
787 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) | ||
788 | & 0x00000040) != 0x00000040) && (i++ < 5000)) { | ||
789 | /* mdelay(1); */ | ||
790 | } | ||
791 | |||
792 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); | ||
793 | serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0); | ||
794 | |||
795 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7); | ||
796 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000); | ||
797 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000); | ||
798 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000); | ||
799 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000); | ||
800 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000); | ||
801 | if (nesadapter->OneG_Mode) { | ||
802 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222); | ||
803 | } else { | ||
804 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222); | ||
805 | } | ||
806 | serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0); | ||
807 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff); | ||
808 | |||
809 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control); | ||
810 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); | ||
811 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta); | ||
812 | nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control); | ||
813 | nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config); | ||
814 | nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match); | ||
815 | nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug); | ||
816 | |||
817 | } while (0); | ||
818 | |||
819 | nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE; | ||
820 | no_mh_work: | ||
821 | nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5); | ||
822 | add_timer(&nesdev->nesadapter->mh_timer); | ||
823 | } | ||
824 | |||
825 | /** | ||
826 | * nes_clc | ||
827 | */ | ||
828 | void nes_clc(unsigned long parm) | ||
829 | { | ||
830 | unsigned long flags; | ||
831 | struct nes_device *nesdev = (struct nes_device *)parm; | ||
832 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
833 | |||
834 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | ||
835 | nesadapter->link_interrupt_count[0] = 0; | ||
836 | nesadapter->link_interrupt_count[1] = 0; | ||
837 | nesadapter->link_interrupt_count[2] = 0; | ||
838 | nesadapter->link_interrupt_count[3] = 0; | ||
839 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
840 | |||
841 | nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */ | ||
842 | add_timer(&nesadapter->lc_timer); | ||
843 | } | ||
844 | |||
845 | |||
846 | /** | ||
847 | * nes_dump_mem | ||
848 | */ | ||
849 | void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length) | ||
850 | { | ||
851 | char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', | ||
852 | 'a', 'b', 'c', 'd', 'e', 'f'}; | ||
853 | char *ptr; | ||
854 | char hex_buf[80]; | ||
855 | char ascii_buf[20]; | ||
856 | int num_char; | ||
857 | int num_ascii; | ||
858 | int num_hex; | ||
859 | |||
860 | if (!(nes_debug_level & dump_debug_level)) { | ||
861 | return; | ||
862 | } | ||
863 | |||
864 | ptr = addr; | ||
865 | if (length > 0x100) { | ||
866 | nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100); | ||
867 | length = 0x100; | ||
868 | } | ||
869 | nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length); | ||
870 | |||
871 | memset(ascii_buf, 0, 20); | ||
872 | memset(hex_buf, 0, 80); | ||
873 | |||
874 | num_ascii = 0; | ||
875 | num_hex = 0; | ||
876 | for (num_char = 0; num_char < length; num_char++) { | ||
877 | if (num_ascii == 8) { | ||
878 | ascii_buf[num_ascii++] = ' '; | ||
879 | hex_buf[num_hex++] = '-'; | ||
880 | hex_buf[num_hex++] = ' '; | ||
881 | } | ||
882 | |||
883 | if (*ptr < 0x20 || *ptr > 0x7e) | ||
884 | ascii_buf[num_ascii++] = '.'; | ||
885 | else | ||
886 | ascii_buf[num_ascii++] = *ptr; | ||
887 | hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)]; | ||
888 | hex_buf[num_hex++] = xlate[*ptr & 0x0f]; | ||
889 | hex_buf[num_hex++] = ' '; | ||
890 | ptr++; | ||
891 | |||
892 | if (num_ascii >= 17) { | ||
893 | /* output line and reset */ | ||
894 | nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf); | ||
895 | memset(ascii_buf, 0, 20); | ||
896 | memset(hex_buf, 0, 80); | ||
897 | num_ascii = 0; | ||
898 | num_hex = 0; | ||
899 | } | ||
900 | } | ||
901 | |||
902 | /* output the rest */ | ||
903 | if (num_ascii) { | ||
904 | while (num_ascii < 17) { | ||
905 | if (num_ascii == 8) { | ||
906 | hex_buf[num_hex++] = ' '; | ||
907 | hex_buf[num_hex++] = ' '; | ||
908 | } | ||
909 | hex_buf[num_hex++] = ' '; | ||
910 | hex_buf[num_hex++] = ' '; | ||
911 | hex_buf[num_hex++] = ' '; | ||
912 | num_ascii++; | ||
913 | } | ||
914 | |||
915 | nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf); | ||
916 | } | ||
917 | } | ||
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c new file mode 100644 index 000000000000..ffd4b425567f --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -0,0 +1,3917 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/highmem.h> | ||
38 | #include <asm/byteorder.h> | ||
39 | |||
40 | #include <rdma/ib_verbs.h> | ||
41 | #include <rdma/iw_cm.h> | ||
42 | #include <rdma/ib_user_verbs.h> | ||
43 | |||
44 | #include "nes.h" | ||
45 | |||
46 | #include <rdma/ib_umem.h> | ||
47 | |||
48 | atomic_t mod_qp_timouts; | ||
49 | atomic_t qps_created; | ||
50 | atomic_t sw_qps_destroyed; | ||
51 | |||
52 | |||
53 | /** | ||
54 | * nes_alloc_mw | ||
55 | */ | ||
56 | static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) { | ||
57 | unsigned long flags; | ||
58 | struct nes_pd *nespd = to_nespd(ibpd); | ||
59 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | ||
60 | struct nes_device *nesdev = nesvnic->nesdev; | ||
61 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
62 | struct nes_cqp_request *cqp_request; | ||
63 | struct nes_mr *nesmr; | ||
64 | struct ib_mw *ibmw; | ||
65 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
66 | int ret; | ||
67 | u32 stag; | ||
68 | u32 stag_index = 0; | ||
69 | u32 next_stag_index = 0; | ||
70 | u32 driver_key = 0; | ||
71 | u8 stag_key = 0; | ||
72 | |||
73 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); | ||
74 | stag_key = (u8)next_stag_index; | ||
75 | |||
76 | driver_key = 0; | ||
77 | |||
78 | next_stag_index >>= 8; | ||
79 | next_stag_index %= nesadapter->max_mr; | ||
80 | |||
81 | ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, | ||
82 | nesadapter->max_mr, &stag_index, &next_stag_index); | ||
83 | if (ret) { | ||
84 | return ERR_PTR(ret); | ||
85 | } | ||
86 | |||
87 | nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL); | ||
88 | if (!nesmr) { | ||
89 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
90 | return ERR_PTR(-ENOMEM); | ||
91 | } | ||
92 | |||
93 | stag = stag_index << 8; | ||
94 | stag |= driver_key; | ||
95 | stag += (u32)stag_key; | ||
96 | |||
97 | nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n", | ||
98 | stag, stag_index); | ||
99 | |||
100 | /* Register the region with the adapter */ | ||
101 | cqp_request = nes_get_cqp_request(nesdev); | ||
102 | if (cqp_request == NULL) { | ||
103 | kfree(nesmr); | ||
104 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
105 | return ERR_PTR(-ENOMEM); | ||
106 | } | ||
107 | |||
108 | cqp_request->waiting = 1; | ||
109 | cqp_wqe = &cqp_request->cqp_wqe; | ||
110 | |||
111 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = | ||
112 | cpu_to_le32( NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_RIGHTS_REMOTE_READ | | ||
113 | NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_VA_TO | | ||
114 | NES_CQP_STAG_REM_ACC_EN); | ||
115 | |||
116 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
117 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff)); | ||
118 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag); | ||
119 | |||
120 | atomic_set(&cqp_request->refcount, 2); | ||
121 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
122 | |||
123 | /* Wait for CQP */ | ||
124 | ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), | ||
125 | NES_EVENT_TIMEOUT); | ||
126 | nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u," | ||
127 | " CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
128 | stag, ret, cqp_request->major_code, cqp_request->minor_code); | ||
129 | if ((!ret) || (cqp_request->major_code)) { | ||
130 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
131 | if (cqp_request->dynamic) { | ||
132 | kfree(cqp_request); | ||
133 | } else { | ||
134 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
135 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
136 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
137 | } | ||
138 | } | ||
139 | kfree(nesmr); | ||
140 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
141 | if (!ret) { | ||
142 | return ERR_PTR(-ETIME); | ||
143 | } else { | ||
144 | return ERR_PTR(-ENOMEM); | ||
145 | } | ||
146 | } else { | ||
147 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
148 | if (cqp_request->dynamic) { | ||
149 | kfree(cqp_request); | ||
150 | } else { | ||
151 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
152 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
153 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
154 | } | ||
155 | } | ||
156 | } | ||
157 | |||
158 | nesmr->ibmw.rkey = stag; | ||
159 | nesmr->mode = IWNES_MEMREG_TYPE_MW; | ||
160 | ibmw = &nesmr->ibmw; | ||
161 | nesmr->pbl_4k = 0; | ||
162 | nesmr->pbls_used = 0; | ||
163 | |||
164 | return ibmw; | ||
165 | } | ||
166 | |||
167 | |||
168 | /** | ||
169 | * nes_dealloc_mw | ||
170 | */ | ||
171 | static int nes_dealloc_mw(struct ib_mw *ibmw) | ||
172 | { | ||
173 | struct nes_mr *nesmr = to_nesmw(ibmw); | ||
174 | struct nes_vnic *nesvnic = to_nesvnic(ibmw->device); | ||
175 | struct nes_device *nesdev = nesvnic->nesdev; | ||
176 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
177 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
178 | struct nes_cqp_request *cqp_request; | ||
179 | int err = 0; | ||
180 | unsigned long flags; | ||
181 | int ret; | ||
182 | |||
183 | /* Deallocate the window with the adapter */ | ||
184 | cqp_request = nes_get_cqp_request(nesdev); | ||
185 | if (cqp_request == NULL) { | ||
186 | nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n"); | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | cqp_request->waiting = 1; | ||
190 | cqp_wqe = &cqp_request->cqp_wqe; | ||
191 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
192 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_DEALLOCATE_STAG); | ||
193 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ibmw->rkey); | ||
194 | |||
195 | atomic_set(&cqp_request->refcount, 2); | ||
196 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
197 | |||
198 | /* Wait for CQP */ | ||
199 | nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n", | ||
200 | ibmw->rkey); | ||
201 | ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done), | ||
202 | NES_EVENT_TIMEOUT); | ||
203 | nes_debug(NES_DBG_MR, "Deallocate STag completed, wait_event_timeout ret = %u," | ||
204 | " CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
205 | ret, cqp_request->major_code, cqp_request->minor_code); | ||
206 | if ((!ret) || (cqp_request->major_code)) { | ||
207 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
208 | if (cqp_request->dynamic) { | ||
209 | kfree(cqp_request); | ||
210 | } else { | ||
211 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
212 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
213 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
214 | } | ||
215 | } | ||
216 | if (!ret) { | ||
217 | err = -ETIME; | ||
218 | } else { | ||
219 | err = -EIO; | ||
220 | } | ||
221 | } else { | ||
222 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
223 | if (cqp_request->dynamic) { | ||
224 | kfree(cqp_request); | ||
225 | } else { | ||
226 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
227 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
228 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
229 | } | ||
230 | } | ||
231 | } | ||
232 | |||
233 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
234 | (ibmw->rkey & 0x0fffff00) >> 8); | ||
235 | kfree(nesmr); | ||
236 | |||
237 | return err; | ||
238 | } | ||
239 | |||
240 | |||
241 | /** | ||
242 | * nes_bind_mw | ||
243 | */ | ||
244 | static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, | ||
245 | struct ib_mw_bind *ibmw_bind) | ||
246 | { | ||
247 | u64 u64temp; | ||
248 | struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); | ||
249 | struct nes_device *nesdev = nesvnic->nesdev; | ||
250 | /* struct nes_mr *nesmr = to_nesmw(ibmw); */ | ||
251 | struct nes_qp *nesqp = to_nesqp(ibqp); | ||
252 | struct nes_hw_qp_wqe *wqe; | ||
253 | unsigned long flags = 0; | ||
254 | u32 head; | ||
255 | u32 wqe_misc = 0; | ||
256 | u32 qsize; | ||
257 | |||
258 | if (nesqp->ibqp_state > IB_QPS_RTS) | ||
259 | return -EINVAL; | ||
260 | |||
261 | spin_lock_irqsave(&nesqp->lock, flags); | ||
262 | |||
263 | head = nesqp->hwqp.sq_head; | ||
264 | qsize = nesqp->hwqp.sq_tail; | ||
265 | |||
266 | /* Check for SQ overflow */ | ||
267 | if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { | ||
268 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | wqe = &nesqp->hwqp.sq_vbase[head]; | ||
273 | /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */ | ||
274 | nes_fill_init_qp_wqe(wqe, nesqp, head); | ||
275 | u64temp = ibmw_bind->wr_id; | ||
276 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp); | ||
277 | wqe_misc = NES_IWARP_SQ_OP_BIND; | ||
278 | |||
279 | wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; | ||
280 | |||
281 | if (ibmw_bind->send_flags & IB_SEND_SIGNALED) | ||
282 | wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; | ||
283 | |||
284 | if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) { | ||
285 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE; | ||
286 | } | ||
287 | if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) { | ||
288 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ; | ||
289 | } | ||
290 | |||
291 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); | ||
292 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey); | ||
293 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); | ||
294 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, | ||
295 | ibmw_bind->length); | ||
296 | wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; | ||
297 | u64temp = (u64)ibmw_bind->addr; | ||
298 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); | ||
299 | |||
300 | head++; | ||
301 | if (head >= qsize) | ||
302 | head = 0; | ||
303 | |||
304 | nesqp->hwqp.sq_head = head; | ||
305 | barrier(); | ||
306 | |||
307 | nes_write32(nesdev->regs+NES_WQE_ALLOC, | ||
308 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); | ||
309 | |||
310 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | |||
316 | /** | ||
317 | * nes_alloc_fmr | ||
318 | */ | ||
319 | static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, | ||
320 | int ibmr_access_flags, | ||
321 | struct ib_fmr_attr *ibfmr_attr) | ||
322 | { | ||
323 | unsigned long flags; | ||
324 | struct nes_pd *nespd = to_nespd(ibpd); | ||
325 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | ||
326 | struct nes_device *nesdev = nesvnic->nesdev; | ||
327 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
328 | struct nes_fmr *nesfmr; | ||
329 | struct nes_cqp_request *cqp_request; | ||
330 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
331 | int ret; | ||
332 | u32 stag; | ||
333 | u32 stag_index = 0; | ||
334 | u32 next_stag_index = 0; | ||
335 | u32 driver_key = 0; | ||
336 | u32 opcode = 0; | ||
337 | u8 stag_key = 0; | ||
338 | int i=0; | ||
339 | struct nes_vpbl vpbl; | ||
340 | |||
341 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); | ||
342 | stag_key = (u8)next_stag_index; | ||
343 | |||
344 | driver_key = 0; | ||
345 | |||
346 | next_stag_index >>= 8; | ||
347 | next_stag_index %= nesadapter->max_mr; | ||
348 | |||
349 | ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, | ||
350 | nesadapter->max_mr, &stag_index, &next_stag_index); | ||
351 | if (ret) { | ||
352 | goto failed_resource_alloc; | ||
353 | } | ||
354 | |||
355 | nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL); | ||
356 | if (!nesfmr) { | ||
357 | ret = -ENOMEM; | ||
358 | goto failed_fmr_alloc; | ||
359 | } | ||
360 | |||
361 | nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR; | ||
362 | if (ibfmr_attr->max_pages == 1) { | ||
363 | /* use zero length PBL */ | ||
364 | nesfmr->nesmr.pbl_4k = 0; | ||
365 | nesfmr->nesmr.pbls_used = 0; | ||
366 | } else if (ibfmr_attr->max_pages <= 32) { | ||
367 | /* use PBL 256 */ | ||
368 | nesfmr->nesmr.pbl_4k = 0; | ||
369 | nesfmr->nesmr.pbls_used = 1; | ||
370 | } else if (ibfmr_attr->max_pages <= 512) { | ||
371 | /* use 4K PBLs */ | ||
372 | nesfmr->nesmr.pbl_4k = 1; | ||
373 | nesfmr->nesmr.pbls_used = 1; | ||
374 | } else { | ||
375 | /* use two level 4K PBLs */ | ||
376 | /* add support for two level 256B PBLs */ | ||
377 | nesfmr->nesmr.pbl_4k = 1; | ||
378 | nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) + | ||
379 | ((ibfmr_attr->max_pages & 511) ? 1 : 0); | ||
380 | } | ||
381 | /* Register the region with the adapter */ | ||
382 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
383 | |||
384 | /* track PBL resources */ | ||
385 | if (nesfmr->nesmr.pbls_used != 0) { | ||
386 | if (nesfmr->nesmr.pbl_4k) { | ||
387 | if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { | ||
388 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
389 | ret = -ENOMEM; | ||
390 | goto failed_vpbl_alloc; | ||
391 | } else { | ||
392 | nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; | ||
393 | } | ||
394 | } else { | ||
395 | if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { | ||
396 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
397 | ret = -ENOMEM; | ||
398 | goto failed_vpbl_alloc; | ||
399 | } else { | ||
400 | nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* one level pbl */ | ||
406 | if (nesfmr->nesmr.pbls_used == 0) { | ||
407 | nesfmr->root_vpbl.pbl_vbase = NULL; | ||
408 | nes_debug(NES_DBG_MR, "zero level pbl \n"); | ||
409 | } else if (nesfmr->nesmr.pbls_used == 1) { | ||
410 | /* can change it to kmalloc & dma_map_single */ | ||
411 | nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096, | ||
412 | &nesfmr->root_vpbl.pbl_pbase); | ||
413 | if (!nesfmr->root_vpbl.pbl_vbase) { | ||
414 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
415 | ret = -ENOMEM; | ||
416 | goto failed_vpbl_alloc; | ||
417 | } | ||
418 | nesfmr->leaf_pbl_cnt = 0; | ||
419 | nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n", | ||
420 | nesfmr->root_vpbl.pbl_vbase); | ||
421 | } | ||
422 | /* two level pbl */ | ||
423 | else { | ||
424 | nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192, | ||
425 | &nesfmr->root_vpbl.pbl_pbase); | ||
426 | if (!nesfmr->root_vpbl.pbl_vbase) { | ||
427 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
428 | ret = -ENOMEM; | ||
429 | goto failed_vpbl_alloc; | ||
430 | } | ||
431 | |||
432 | nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); | ||
433 | if (!nesfmr->root_vpbl.leaf_vpbl) { | ||
434 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
435 | ret = -ENOMEM; | ||
436 | goto failed_leaf_vpbl_alloc; | ||
437 | } | ||
438 | |||
439 | nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; | ||
440 | nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" | ||
441 | " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", | ||
442 | nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); | ||
443 | |||
444 | for (i=0; i<nesfmr->leaf_pbl_cnt; i++) | ||
445 | nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL; | ||
446 | |||
447 | for (i=0; i<nesfmr->leaf_pbl_cnt; i++) { | ||
448 | vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096, | ||
449 | &vpbl.pbl_pbase); | ||
450 | |||
451 | if (!vpbl.pbl_vbase) { | ||
452 | ret = -ENOMEM; | ||
453 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
454 | goto failed_leaf_vpbl_pages_alloc; | ||
455 | } | ||
456 | |||
457 | nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase); | ||
458 | nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32))); | ||
459 | nesfmr->root_vpbl.leaf_vpbl[i] = vpbl; | ||
460 | |||
461 | nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n", | ||
462 | nesfmr->root_vpbl.pbl_vbase[i].pa_low, | ||
463 | nesfmr->root_vpbl.pbl_vbase[i].pa_high, | ||
464 | &nesfmr->root_vpbl.leaf_vpbl[i]); | ||
465 | } | ||
466 | } | ||
467 | nesfmr->ib_qp = NULL; | ||
468 | nesfmr->access_rights =0; | ||
469 | |||
470 | stag = stag_index << 8; | ||
471 | stag |= driver_key; | ||
472 | stag += (u32)stag_key; | ||
473 | |||
474 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
475 | cqp_request = nes_get_cqp_request(nesdev); | ||
476 | if (cqp_request == NULL) { | ||
477 | nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n"); | ||
478 | ret = -ENOMEM; | ||
479 | goto failed_leaf_vpbl_pages_alloc; | ||
480 | } | ||
481 | cqp_request->waiting = 1; | ||
482 | cqp_wqe = &cqp_request->cqp_wqe; | ||
483 | |||
484 | nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n", | ||
485 | stag, stag_index); | ||
486 | |||
487 | opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR; | ||
488 | |||
489 | if (nesfmr->nesmr.pbl_4k == 1) | ||
490 | opcode |= NES_CQP_STAG_PBL_BLK_SIZE; | ||
491 | |||
492 | if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) { | ||
493 | opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | | ||
494 | NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN; | ||
495 | nesfmr->access_rights |= | ||
496 | NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE | | ||
497 | NES_CQP_STAG_REM_ACC_EN; | ||
498 | } | ||
499 | |||
500 | if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) { | ||
501 | opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | | ||
502 | NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN; | ||
503 | nesfmr->access_rights |= | ||
504 | NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ | | ||
505 | NES_CQP_STAG_REM_ACC_EN; | ||
506 | } | ||
507 | |||
508 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
509 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); | ||
510 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff)); | ||
511 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag); | ||
512 | |||
513 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = | ||
514 | cpu_to_le32((nesfmr->nesmr.pbls_used>1) ? | ||
515 | (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used); | ||
516 | |||
517 | atomic_set(&cqp_request->refcount, 2); | ||
518 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
519 | |||
520 | /* Wait for CQP */ | ||
521 | ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), | ||
522 | NES_EVENT_TIMEOUT); | ||
523 | nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u," | ||
524 | " CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
525 | stag, ret, cqp_request->major_code, cqp_request->minor_code); | ||
526 | |||
527 | if ((!ret) || (cqp_request->major_code)) { | ||
528 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
529 | if (cqp_request->dynamic) { | ||
530 | kfree(cqp_request); | ||
531 | } else { | ||
532 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
533 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
534 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
535 | } | ||
536 | } | ||
537 | ret = (!ret) ? -ETIME : -EIO; | ||
538 | goto failed_leaf_vpbl_pages_alloc; | ||
539 | } else { | ||
540 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
541 | if (cqp_request->dynamic) { | ||
542 | kfree(cqp_request); | ||
543 | } else { | ||
544 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
545 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
546 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
547 | } | ||
548 | } | ||
549 | } | ||
550 | |||
551 | nesfmr->nesmr.ibfmr.lkey = stag; | ||
552 | nesfmr->nesmr.ibfmr.rkey = stag; | ||
553 | nesfmr->attr = *ibfmr_attr; | ||
554 | |||
555 | return &nesfmr->nesmr.ibfmr; | ||
556 | |||
557 | failed_leaf_vpbl_pages_alloc: | ||
558 | /* unroll all allocated pages */ | ||
559 | for (i=0; i<nesfmr->leaf_pbl_cnt; i++) { | ||
560 | if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) { | ||
561 | pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase, | ||
562 | nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase); | ||
563 | } | ||
564 | } | ||
565 | if (nesfmr->root_vpbl.leaf_vpbl) | ||
566 | kfree(nesfmr->root_vpbl.leaf_vpbl); | ||
567 | |||
568 | failed_leaf_vpbl_alloc: | ||
569 | if (nesfmr->leaf_pbl_cnt == 0) { | ||
570 | if (nesfmr->root_vpbl.pbl_vbase) | ||
571 | pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase, | ||
572 | nesfmr->root_vpbl.pbl_pbase); | ||
573 | } else | ||
574 | pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, | ||
575 | nesfmr->root_vpbl.pbl_pbase); | ||
576 | |||
577 | failed_vpbl_alloc: | ||
578 | kfree(nesfmr); | ||
579 | |||
580 | failed_fmr_alloc: | ||
581 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
582 | |||
583 | failed_resource_alloc: | ||
584 | return ERR_PTR(ret); | ||
585 | } | ||
586 | |||
587 | |||
588 | /** | ||
589 | * nes_dealloc_fmr | ||
590 | */ | ||
591 | static int nes_dealloc_fmr(struct ib_fmr *ibfmr) | ||
592 | { | ||
593 | struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); | ||
594 | struct nes_fmr *nesfmr = to_nesfmr(nesmr); | ||
595 | struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); | ||
596 | struct nes_device *nesdev = nesvnic->nesdev; | ||
597 | struct nes_mr temp_nesmr = *nesmr; | ||
598 | int i = 0; | ||
599 | |||
600 | temp_nesmr.ibmw.device = ibfmr->device; | ||
601 | temp_nesmr.ibmw.pd = ibfmr->pd; | ||
602 | temp_nesmr.ibmw.rkey = ibfmr->rkey; | ||
603 | temp_nesmr.ibmw.uobject = NULL; | ||
604 | |||
605 | /* free the resources */ | ||
606 | if (nesfmr->leaf_pbl_cnt == 0) { | ||
607 | /* single PBL case */ | ||
608 | if (nesfmr->root_vpbl.pbl_vbase) | ||
609 | pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase, | ||
610 | nesfmr->root_vpbl.pbl_pbase); | ||
611 | } else { | ||
612 | for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) { | ||
613 | pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase, | ||
614 | nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase); | ||
615 | } | ||
616 | kfree(nesfmr->root_vpbl.leaf_vpbl); | ||
617 | pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, | ||
618 | nesfmr->root_vpbl.pbl_pbase); | ||
619 | } | ||
620 | |||
621 | return nes_dealloc_mw(&temp_nesmr.ibmw); | ||
622 | } | ||
623 | |||
624 | |||
625 | /** | ||
626 | * nes_map_phys_fmr | ||
627 | */ | ||
628 | static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | ||
629 | int list_len, u64 iova) | ||
630 | { | ||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | |||
635 | /** | ||
636 | * nes_unmap_frm | ||
637 | */ | ||
638 | static int nes_unmap_fmr(struct list_head *ibfmr_list) | ||
639 | { | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | |||
644 | |||
645 | /** | ||
646 | * nes_query_device | ||
647 | */ | ||
648 | static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | ||
649 | { | ||
650 | struct nes_vnic *nesvnic = to_nesvnic(ibdev); | ||
651 | struct nes_device *nesdev = nesvnic->nesdev; | ||
652 | struct nes_ib_device *nesibdev = nesvnic->nesibdev; | ||
653 | |||
654 | memset(props, 0, sizeof(*props)); | ||
655 | memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6); | ||
656 | |||
657 | props->fw_ver = nesdev->nesadapter->fw_ver; | ||
658 | props->device_cap_flags = nesdev->nesadapter->device_cap_flags; | ||
659 | props->vendor_id = nesdev->nesadapter->vendor_id; | ||
660 | props->vendor_part_id = nesdev->nesadapter->vendor_part_id; | ||
661 | props->hw_ver = nesdev->nesadapter->hw_rev; | ||
662 | props->max_mr_size = 0x80000000; | ||
663 | props->max_qp = nesibdev->max_qp; | ||
664 | props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2; | ||
665 | props->max_sge = nesdev->nesadapter->max_sge; | ||
666 | props->max_cq = nesibdev->max_cq; | ||
667 | props->max_cqe = nesdev->nesadapter->max_cqe - 1; | ||
668 | props->max_mr = nesibdev->max_mr; | ||
669 | props->max_mw = nesibdev->max_mr; | ||
670 | props->max_pd = nesibdev->max_pd; | ||
671 | props->max_sge_rd = 1; | ||
672 | switch (nesdev->nesadapter->max_irrq_wr) { | ||
673 | case 0: | ||
674 | props->max_qp_rd_atom = 1; | ||
675 | break; | ||
676 | case 1: | ||
677 | props->max_qp_rd_atom = 4; | ||
678 | break; | ||
679 | case 2: | ||
680 | props->max_qp_rd_atom = 16; | ||
681 | break; | ||
682 | case 3: | ||
683 | props->max_qp_rd_atom = 32; | ||
684 | break; | ||
685 | default: | ||
686 | props->max_qp_rd_atom = 0; | ||
687 | } | ||
688 | props->max_qp_init_rd_atom = props->max_qp_wr; | ||
689 | props->atomic_cap = IB_ATOMIC_NONE; | ||
690 | props->max_map_per_fmr = 1; | ||
691 | |||
692 | return 0; | ||
693 | } | ||
694 | |||
695 | |||
696 | /** | ||
697 | * nes_query_port | ||
698 | */ | ||
699 | static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) | ||
700 | { | ||
701 | memset(props, 0, sizeof(*props)); | ||
702 | |||
703 | props->max_mtu = IB_MTU_2048; | ||
704 | props->active_mtu = IB_MTU_2048; | ||
705 | props->lid = 1; | ||
706 | props->lmc = 0; | ||
707 | props->sm_lid = 0; | ||
708 | props->sm_sl = 0; | ||
709 | props->state = IB_PORT_ACTIVE; | ||
710 | props->phys_state = 0; | ||
711 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | | ||
712 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | ||
713 | props->gid_tbl_len = 1; | ||
714 | props->pkey_tbl_len = 1; | ||
715 | props->qkey_viol_cntr = 0; | ||
716 | props->active_width = IB_WIDTH_4X; | ||
717 | props->active_speed = 1; | ||
718 | props->max_msg_sz = 0x80000000; | ||
719 | |||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | |||
724 | /** | ||
725 | * nes_modify_port | ||
726 | */ | ||
727 | static int nes_modify_port(struct ib_device *ibdev, u8 port, | ||
728 | int port_modify_mask, struct ib_port_modify *props) | ||
729 | { | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | |||
734 | /** | ||
735 | * nes_query_pkey | ||
736 | */ | ||
737 | static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | ||
738 | { | ||
739 | *pkey = 0; | ||
740 | return 0; | ||
741 | } | ||
742 | |||
743 | |||
744 | /** | ||
745 | * nes_query_gid | ||
746 | */ | ||
747 | static int nes_query_gid(struct ib_device *ibdev, u8 port, | ||
748 | int index, union ib_gid *gid) | ||
749 | { | ||
750 | struct nes_vnic *nesvnic = to_nesvnic(ibdev); | ||
751 | |||
752 | memset(&(gid->raw[0]), 0, sizeof(gid->raw)); | ||
753 | memcpy(&(gid->raw[0]), nesvnic->netdev->dev_addr, 6); | ||
754 | |||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | |||
759 | /** | ||
760 | * nes_alloc_ucontext - Allocate the user context data structure. This keeps track | ||
761 | * of all objects associated with a particular user-mode client. | ||
762 | */ | ||
763 | static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev, | ||
764 | struct ib_udata *udata) | ||
765 | { | ||
766 | struct nes_vnic *nesvnic = to_nesvnic(ibdev); | ||
767 | struct nes_device *nesdev = nesvnic->nesdev; | ||
768 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
769 | struct nes_alloc_ucontext_req req; | ||
770 | struct nes_alloc_ucontext_resp uresp; | ||
771 | struct nes_ucontext *nes_ucontext; | ||
772 | struct nes_ib_device *nesibdev = nesvnic->nesibdev; | ||
773 | |||
774 | |||
775 | if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) { | ||
776 | printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n"); | ||
777 | return ERR_PTR(-EINVAL); | ||
778 | } | ||
779 | |||
780 | if (req.userspace_ver != NES_ABI_USERSPACE_VER) { | ||
781 | printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n", | ||
782 | req.userspace_ver, NES_ABI_USERSPACE_VER); | ||
783 | return ERR_PTR(-EINVAL); | ||
784 | } | ||
785 | |||
786 | |||
787 | memset(&uresp, 0, sizeof uresp); | ||
788 | |||
789 | uresp.max_qps = nesibdev->max_qp; | ||
790 | uresp.max_pds = nesibdev->max_pd; | ||
791 | uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2; | ||
792 | uresp.virtwq = nesadapter->virtwq; | ||
793 | uresp.kernel_ver = NES_ABI_KERNEL_VER; | ||
794 | |||
795 | nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL); | ||
796 | if (!nes_ucontext) | ||
797 | return ERR_PTR(-ENOMEM); | ||
798 | |||
799 | nes_ucontext->nesdev = nesdev; | ||
800 | nes_ucontext->mmap_wq_offset = uresp.max_pds; | ||
801 | nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset + | ||
802 | ((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) / | ||
803 | PAGE_SIZE; | ||
804 | |||
805 | |||
806 | if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { | ||
807 | kfree(nes_ucontext); | ||
808 | return ERR_PTR(-EFAULT); | ||
809 | } | ||
810 | |||
811 | INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list); | ||
812 | INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list); | ||
813 | atomic_set(&nes_ucontext->usecnt, 1); | ||
814 | return &nes_ucontext->ibucontext; | ||
815 | } | ||
816 | |||
817 | |||
818 | /** | ||
819 | * nes_dealloc_ucontext | ||
820 | */ | ||
821 | static int nes_dealloc_ucontext(struct ib_ucontext *context) | ||
822 | { | ||
823 | /* struct nes_vnic *nesvnic = to_nesvnic(context->device); */ | ||
824 | /* struct nes_device *nesdev = nesvnic->nesdev; */ | ||
825 | struct nes_ucontext *nes_ucontext = to_nesucontext(context); | ||
826 | |||
827 | if (!atomic_dec_and_test(&nes_ucontext->usecnt)) | ||
828 | return 0; | ||
829 | kfree(nes_ucontext); | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | |||
834 | /** | ||
835 | * nes_mmap | ||
836 | */ | ||
837 | static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
838 | { | ||
839 | unsigned long index; | ||
840 | struct nes_vnic *nesvnic = to_nesvnic(context->device); | ||
841 | struct nes_device *nesdev = nesvnic->nesdev; | ||
842 | /* struct nes_adapter *nesadapter = nesdev->nesadapter; */ | ||
843 | struct nes_ucontext *nes_ucontext; | ||
844 | struct nes_qp *nesqp; | ||
845 | |||
846 | nes_ucontext = to_nesucontext(context); | ||
847 | |||
848 | |||
849 | if (vma->vm_pgoff >= nes_ucontext->mmap_wq_offset) { | ||
850 | index = (vma->vm_pgoff - nes_ucontext->mmap_wq_offset) * PAGE_SIZE; | ||
851 | index /= ((sizeof(struct nes_hw_qp_wqe) * nesdev->nesadapter->max_qp_wr * 2) + | ||
852 | PAGE_SIZE-1) & (~(PAGE_SIZE-1)); | ||
853 | if (!test_bit(index, nes_ucontext->allocated_wqs)) { | ||
854 | nes_debug(NES_DBG_MMAP, "wq %lu not allocated\n", index); | ||
855 | return -EFAULT; | ||
856 | } | ||
857 | nesqp = nes_ucontext->mmap_nesqp[index]; | ||
858 | if (nesqp == NULL) { | ||
859 | nes_debug(NES_DBG_MMAP, "wq %lu has a NULL QP base.\n", index); | ||
860 | return -EFAULT; | ||
861 | } | ||
862 | if (remap_pfn_range(vma, vma->vm_start, | ||
863 | virt_to_phys(nesqp->hwqp.sq_vbase) >> PAGE_SHIFT, | ||
864 | vma->vm_end - vma->vm_start, | ||
865 | vma->vm_page_prot)) { | ||
866 | nes_debug(NES_DBG_MMAP, "remap_pfn_range failed.\n"); | ||
867 | return -EAGAIN; | ||
868 | } | ||
869 | vma->vm_private_data = nesqp; | ||
870 | return 0; | ||
871 | } else { | ||
872 | index = vma->vm_pgoff; | ||
873 | if (!test_bit(index, nes_ucontext->allocated_doorbells)) | ||
874 | return -EFAULT; | ||
875 | |||
876 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
877 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
878 | (nesdev->doorbell_start + | ||
879 | ((nes_ucontext->mmap_db_index[index] - nesdev->base_doorbell_index) * 4096)) | ||
880 | >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) | ||
881 | return -EAGAIN; | ||
882 | vma->vm_private_data = nes_ucontext; | ||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | return -ENOSYS; | ||
887 | } | ||
888 | |||
889 | |||
890 | /** | ||
891 | * nes_alloc_pd | ||
892 | */ | ||
893 | static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, | ||
894 | struct ib_ucontext *context, struct ib_udata *udata) | ||
895 | { | ||
896 | struct nes_pd *nespd; | ||
897 | struct nes_vnic *nesvnic = to_nesvnic(ibdev); | ||
898 | struct nes_device *nesdev = nesvnic->nesdev; | ||
899 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
900 | struct nes_ucontext *nesucontext; | ||
901 | struct nes_alloc_pd_resp uresp; | ||
902 | u32 pd_num = 0; | ||
903 | int err; | ||
904 | |||
905 | nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n", | ||
906 | nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context, | ||
907 | atomic_read(&nesvnic->netdev->refcnt)); | ||
908 | |||
909 | err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, | ||
910 | nesadapter->max_pd, &pd_num, &nesadapter->next_pd); | ||
911 | if (err) { | ||
912 | return ERR_PTR(err); | ||
913 | } | ||
914 | |||
915 | nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL); | ||
916 | if (!nespd) { | ||
917 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | ||
918 | return ERR_PTR(-ENOMEM); | ||
919 | } | ||
920 | |||
921 | nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n", | ||
922 | nespd, nesvnic->nesibdev->ibdev.name); | ||
923 | |||
924 | nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd; | ||
925 | |||
926 | if (context) { | ||
927 | nesucontext = to_nesucontext(context); | ||
928 | nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells, | ||
929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); | ||
930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", | ||
931 | nespd->mmap_db_index, nespd->pd_id); | ||
932 | if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { | ||
933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); | ||
934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | ||
935 | kfree(nespd); | ||
936 | return ERR_PTR(-ENOMEM); | ||
937 | } | ||
938 | |||
939 | uresp.pd_id = nespd->pd_id; | ||
940 | uresp.mmap_db_index = nespd->mmap_db_index; | ||
941 | if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) { | ||
942 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | ||
943 | kfree(nespd); | ||
944 | return ERR_PTR(-EFAULT); | ||
945 | } | ||
946 | |||
947 | set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells); | ||
948 | nesucontext->mmap_db_index[nespd->mmap_db_index] = nespd->pd_id; | ||
949 | nesucontext->first_free_db = nespd->mmap_db_index + 1; | ||
950 | } | ||
951 | |||
952 | nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd); | ||
953 | return &nespd->ibpd; | ||
954 | } | ||
955 | |||
956 | |||
957 | /** | ||
958 | * nes_dealloc_pd | ||
959 | */ | ||
960 | static int nes_dealloc_pd(struct ib_pd *ibpd) | ||
961 | { | ||
962 | struct nes_ucontext *nesucontext; | ||
963 | struct nes_pd *nespd = to_nespd(ibpd); | ||
964 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | ||
965 | struct nes_device *nesdev = nesvnic->nesdev; | ||
966 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
967 | |||
968 | if ((ibpd->uobject) && (ibpd->uobject->context)) { | ||
969 | nesucontext = to_nesucontext(ibpd->uobject->context); | ||
970 | nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n", | ||
971 | nespd->mmap_db_index); | ||
972 | clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells); | ||
973 | nesucontext->mmap_db_index[nespd->mmap_db_index] = 0; | ||
974 | if (nesucontext->first_free_db > nespd->mmap_db_index) { | ||
975 | nesucontext->first_free_db = nespd->mmap_db_index; | ||
976 | } | ||
977 | } | ||
978 | |||
979 | nes_debug(NES_DBG_PD, "Deallocating PD%u structure located @%p.\n", | ||
980 | nespd->pd_id, nespd); | ||
981 | nes_free_resource(nesadapter, nesadapter->allocated_pds, | ||
982 | (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12)); | ||
983 | kfree(nespd); | ||
984 | |||
985 | return 0; | ||
986 | } | ||
987 | |||
988 | |||
989 | /** | ||
990 | * nes_create_ah | ||
991 | */ | ||
992 | static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | ||
993 | { | ||
994 | return ERR_PTR(-ENOSYS); | ||
995 | } | ||
996 | |||
997 | |||
998 | /** | ||
999 | * nes_destroy_ah | ||
1000 | */ | ||
1001 | static int nes_destroy_ah(struct ib_ah *ah) | ||
1002 | { | ||
1003 | return -ENOSYS; | ||
1004 | } | ||
1005 | |||
1006 | |||
1007 | /** | ||
1008 | * nes_get_encoded_size | ||
1009 | */ | ||
1010 | static inline u8 nes_get_encoded_size(int *size) | ||
1011 | { | ||
1012 | u8 encoded_size = 0; | ||
1013 | if (*size <= 32) { | ||
1014 | *size = 32; | ||
1015 | encoded_size = 1; | ||
1016 | } else if (*size <= 128) { | ||
1017 | *size = 128; | ||
1018 | encoded_size = 2; | ||
1019 | } else if (*size <= 512) { | ||
1020 | *size = 512; | ||
1021 | encoded_size = 3; | ||
1022 | } | ||
1023 | return (encoded_size); | ||
1024 | } | ||
1025 | |||
1026 | |||
1027 | |||
1028 | /** | ||
1029 | * nes_setup_virt_qp | ||
1030 | */ | ||
1031 | static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl, | ||
1032 | struct nes_vnic *nesvnic, int sq_size, int rq_size) | ||
1033 | { | ||
1034 | unsigned long flags; | ||
1035 | void *mem; | ||
1036 | __le64 *pbl = NULL; | ||
1037 | __le64 *tpbl; | ||
1038 | __le64 *pblbuffer; | ||
1039 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1040 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1041 | u32 pbl_entries; | ||
1042 | u8 rq_pbl_entries; | ||
1043 | u8 sq_pbl_entries; | ||
1044 | |||
1045 | pbl_entries = nespbl->pbl_size >> 3; | ||
1046 | nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n", | ||
1047 | nespbl->pbl_size, pbl_entries, | ||
1048 | (void *)nespbl->pbl_vbase, | ||
1049 | (void *)nespbl->pbl_pbase); | ||
1050 | pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */ | ||
1051 | /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */ | ||
1052 | /* the first pbl to be fro the rq_vbase... */ | ||
1053 | rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> 12; | ||
1054 | sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> 12; | ||
1055 | nesqp->hwqp.sq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32); | ||
1056 | if (!nespbl->page) { | ||
1057 | nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n"); | ||
1058 | kfree(nespbl); | ||
1059 | return -ENOMEM; | ||
1060 | } | ||
1061 | |||
1062 | nesqp->hwqp.sq_vbase = kmap(nespbl->page); | ||
1063 | nesqp->page = nespbl->page; | ||
1064 | if (!nesqp->hwqp.sq_vbase) { | ||
1065 | nes_debug(NES_DBG_QP, "QP sq_vbase kmap failed\n"); | ||
1066 | kfree(nespbl); | ||
1067 | return -ENOMEM; | ||
1068 | } | ||
1069 | |||
1070 | /* Now to get to sq.. we need to calculate how many */ | ||
1071 | /* PBL entries were used by the rq.. */ | ||
1072 | pbl += sq_pbl_entries; | ||
1073 | nesqp->hwqp.rq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32); | ||
1074 | /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */ | ||
1075 | /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */ | ||
1076 | |||
1077 | nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n", | ||
1078 | nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase, | ||
1079 | nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase); | ||
1080 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
1081 | if (!nesadapter->free_256pbl) { | ||
1082 | pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, | ||
1083 | nespbl->pbl_pbase); | ||
1084 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1085 | kunmap(nesqp->page); | ||
1086 | kfree(nespbl); | ||
1087 | return -ENOMEM; | ||
1088 | } | ||
1089 | nesadapter->free_256pbl--; | ||
1090 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1091 | |||
1092 | nesqp->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 256, &nesqp->pbl_pbase); | ||
1093 | pblbuffer = nesqp->pbl_vbase; | ||
1094 | if (!nesqp->pbl_vbase) { | ||
1095 | /* memory allocated during nes_reg_user_mr() */ | ||
1096 | pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, | ||
1097 | nespbl->pbl_pbase); | ||
1098 | kfree(nespbl); | ||
1099 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
1100 | nesadapter->free_256pbl++; | ||
1101 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1102 | kunmap(nesqp->page); | ||
1103 | return -ENOMEM; | ||
1104 | } | ||
1105 | memset(nesqp->pbl_vbase, 0, 256); | ||
1106 | /* fill in the page address in the pbl buffer.. */ | ||
1107 | tpbl = pblbuffer + 16; | ||
1108 | pbl = (__le64 *)nespbl->pbl_vbase; | ||
1109 | while (sq_pbl_entries--) | ||
1110 | *tpbl++ = *pbl++; | ||
1111 | tpbl = pblbuffer; | ||
1112 | while (rq_pbl_entries--) | ||
1113 | *tpbl++ = *pbl++; | ||
1114 | |||
1115 | /* done with memory allocated during nes_reg_user_mr() */ | ||
1116 | pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, | ||
1117 | nespbl->pbl_pbase); | ||
1118 | kfree(nespbl); | ||
1119 | |||
1120 | nesqp->qp_mem_size = | ||
1121 | max((u32)sizeof(struct nes_qp_context), ((u32)256)) + 256; /* this is Q2 */ | ||
1122 | /* Round up to a multiple of a page */ | ||
1123 | nesqp->qp_mem_size += PAGE_SIZE - 1; | ||
1124 | nesqp->qp_mem_size &= ~(PAGE_SIZE - 1); | ||
1125 | |||
1126 | mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size, | ||
1127 | &nesqp->hwqp.q2_pbase); | ||
1128 | |||
1129 | if (!mem) { | ||
1130 | pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase); | ||
1131 | nesqp->pbl_vbase = NULL; | ||
1132 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
1133 | nesadapter->free_256pbl++; | ||
1134 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1135 | kunmap(nesqp->page); | ||
1136 | return -ENOMEM; | ||
1137 | } | ||
1138 | nesqp->hwqp.q2_vbase = mem; | ||
1139 | mem += 256; | ||
1140 | memset(nesqp->hwqp.q2_vbase, 0, 256); | ||
1141 | nesqp->nesqp_context = mem; | ||
1142 | memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context)); | ||
1143 | nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256; | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
1147 | |||
1148 | |||
1149 | /** | ||
1150 | * nes_setup_mmap_qp | ||
1151 | */ | ||
1152 | static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic, | ||
1153 | int sq_size, int rq_size) | ||
1154 | { | ||
1155 | void *mem; | ||
1156 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1157 | |||
1158 | nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) + | ||
1159 | (sizeof(struct nes_hw_qp_wqe) * rq_size) + | ||
1160 | max((u32)sizeof(struct nes_qp_context), ((u32)256)) + | ||
1161 | 256; /* this is Q2 */ | ||
1162 | /* Round up to a multiple of a page */ | ||
1163 | nesqp->qp_mem_size += PAGE_SIZE - 1; | ||
1164 | nesqp->qp_mem_size &= ~(PAGE_SIZE - 1); | ||
1165 | |||
1166 | mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size, | ||
1167 | &nesqp->hwqp.sq_pbase); | ||
1168 | if (!mem) | ||
1169 | return -ENOMEM; | ||
1170 | nes_debug(NES_DBG_QP, "PCI consistent memory for " | ||
1171 | "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n", | ||
1172 | mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size); | ||
1173 | |||
1174 | memset(mem, 0, nesqp->qp_mem_size); | ||
1175 | |||
1176 | nesqp->hwqp.sq_vbase = mem; | ||
1177 | mem += sizeof(struct nes_hw_qp_wqe) * sq_size; | ||
1178 | |||
1179 | nesqp->hwqp.rq_vbase = mem; | ||
1180 | nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase + | ||
1181 | sizeof(struct nes_hw_qp_wqe) * sq_size; | ||
1182 | mem += sizeof(struct nes_hw_qp_wqe) * rq_size; | ||
1183 | |||
1184 | nesqp->hwqp.q2_vbase = mem; | ||
1185 | nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase + | ||
1186 | sizeof(struct nes_hw_qp_wqe) * rq_size; | ||
1187 | mem += 256; | ||
1188 | memset(nesqp->hwqp.q2_vbase, 0, 256); | ||
1189 | |||
1190 | nesqp->nesqp_context = mem; | ||
1191 | nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256; | ||
1192 | memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context)); | ||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | |||
1197 | /** | ||
1198 | * nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory. | ||
1199 | */ | ||
1200 | static inline void nes_free_qp_mem(struct nes_device *nesdev, | ||
1201 | struct nes_qp *nesqp, int virt_wqs) | ||
1202 | { | ||
1203 | unsigned long flags; | ||
1204 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1205 | if (!virt_wqs) { | ||
1206 | pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, | ||
1207 | nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase); | ||
1208 | }else { | ||
1209 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
1210 | nesadapter->free_256pbl++; | ||
1211 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1212 | pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); | ||
1213 | pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase ); | ||
1214 | nesqp->pbl_vbase = NULL; | ||
1215 | kunmap(nesqp->page); | ||
1216 | } | ||
1217 | } | ||
1218 | |||
1219 | |||
1220 | /** | ||
1221 | * nes_create_qp | ||
1222 | */ | ||
1223 | static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | ||
1224 | struct ib_qp_init_attr *init_attr, struct ib_udata *udata) | ||
1225 | { | ||
1226 | u64 u64temp= 0; | ||
1227 | u64 u64nesqp = 0; | ||
1228 | struct nes_pd *nespd = to_nespd(ibpd); | ||
1229 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | ||
1230 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1231 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1232 | struct nes_qp *nesqp; | ||
1233 | struct nes_cq *nescq; | ||
1234 | struct nes_ucontext *nes_ucontext; | ||
1235 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1236 | struct nes_cqp_request *cqp_request; | ||
1237 | struct nes_create_qp_req req; | ||
1238 | struct nes_create_qp_resp uresp; | ||
1239 | struct nes_pbl *nespbl = NULL; | ||
1240 | u32 qp_num = 0; | ||
1241 | u32 opcode = 0; | ||
1242 | /* u32 counter = 0; */ | ||
1243 | void *mem; | ||
1244 | unsigned long flags; | ||
1245 | int ret; | ||
1246 | int err; | ||
1247 | int virt_wqs = 0; | ||
1248 | int sq_size; | ||
1249 | int rq_size; | ||
1250 | u8 sq_encoded_size; | ||
1251 | u8 rq_encoded_size; | ||
1252 | /* int counter; */ | ||
1253 | |||
1254 | atomic_inc(&qps_created); | ||
1255 | switch (init_attr->qp_type) { | ||
1256 | case IB_QPT_RC: | ||
1257 | if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { | ||
1258 | init_attr->cap.max_inline_data = 0; | ||
1259 | } else { | ||
1260 | init_attr->cap.max_inline_data = 64; | ||
1261 | } | ||
1262 | sq_size = init_attr->cap.max_send_wr; | ||
1263 | rq_size = init_attr->cap.max_recv_wr; | ||
1264 | |||
1265 | // check if the encoded sizes are OK or not... | ||
1266 | sq_encoded_size = nes_get_encoded_size(&sq_size); | ||
1267 | rq_encoded_size = nes_get_encoded_size(&rq_size); | ||
1268 | |||
1269 | if ((!sq_encoded_size) || (!rq_encoded_size)) { | ||
1270 | nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n", | ||
1271 | rq_size, sq_size); | ||
1272 | return ERR_PTR(-EINVAL); | ||
1273 | } | ||
1274 | |||
1275 | init_attr->cap.max_send_wr = sq_size -2; | ||
1276 | init_attr->cap.max_recv_wr = rq_size -1; | ||
1277 | nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size); | ||
1278 | |||
1279 | ret = nes_alloc_resource(nesadapter, nesadapter->allocated_qps, | ||
1280 | nesadapter->max_qp, &qp_num, &nesadapter->next_qp); | ||
1281 | if (ret) { | ||
1282 | return ERR_PTR(ret); | ||
1283 | } | ||
1284 | |||
1285 | /* Need 512 (actually now 1024) byte alignment on this structure */ | ||
1286 | mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL); | ||
1287 | if (!mem) { | ||
1288 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1289 | nes_debug(NES_DBG_QP, "Unable to allocate QP\n"); | ||
1290 | return ERR_PTR(-ENOMEM); | ||
1291 | } | ||
1292 | u64nesqp = (unsigned long)mem; | ||
1293 | u64nesqp += ((u64)NES_SW_CONTEXT_ALIGN) - 1; | ||
1294 | u64temp = ((u64)NES_SW_CONTEXT_ALIGN) - 1; | ||
1295 | u64nesqp &= ~u64temp; | ||
1296 | nesqp = (struct nes_qp *)(unsigned long)u64nesqp; | ||
1297 | /* nes_debug(NES_DBG_QP, "nesqp=%p, allocated buffer=%p. Rounded to closest %u\n", | ||
1298 | nesqp, mem, NES_SW_CONTEXT_ALIGN); */ | ||
1299 | nesqp->allocated_buffer = mem; | ||
1300 | |||
1301 | if (udata) { | ||
1302 | if (ib_copy_from_udata(&req, udata, sizeof(struct nes_create_qp_req))) { | ||
1303 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1304 | kfree(nesqp->allocated_buffer); | ||
1305 | nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n"); | ||
1306 | return NULL; | ||
1307 | } | ||
1308 | if (req.user_wqe_buffers) { | ||
1309 | virt_wqs = 1; | ||
1310 | } | ||
1311 | if ((ibpd->uobject) && (ibpd->uobject->context)) { | ||
1312 | nesqp->user_mode = 1; | ||
1313 | nes_ucontext = to_nesucontext(ibpd->uobject->context); | ||
1314 | if (virt_wqs) { | ||
1315 | err = 1; | ||
1316 | list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) { | ||
1317 | if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) { | ||
1318 | list_del(&nespbl->list); | ||
1319 | err = 0; | ||
1320 | nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n", | ||
1321 | nespbl, nespbl->user_base); | ||
1322 | break; | ||
1323 | } | ||
1324 | } | ||
1325 | if (err) { | ||
1326 | nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n", | ||
1327 | (long long unsigned int)req.user_wqe_buffers); | ||
1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1329 | kfree(nesqp->allocated_buffer); | ||
1330 | return ERR_PTR(-ENOMEM); | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | nes_ucontext = to_nesucontext(ibpd->uobject->context); | ||
1335 | nesqp->mmap_sq_db_index = | ||
1336 | find_next_zero_bit(nes_ucontext->allocated_wqs, | ||
1337 | NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq); | ||
1338 | /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n", | ||
1339 | nespd->mmap_db_index); */ | ||
1340 | if (nesqp->mmap_sq_db_index > NES_MAX_USER_WQ_REGIONS) { | ||
1341 | nes_debug(NES_DBG_QP, | ||
1342 | "db index > max user regions, failing create QP\n"); | ||
1343 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1344 | if (virt_wqs) { | ||
1345 | pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, | ||
1346 | nespbl->pbl_pbase); | ||
1347 | kfree(nespbl); | ||
1348 | } | ||
1349 | kfree(nesqp->allocated_buffer); | ||
1350 | return ERR_PTR(-ENOMEM); | ||
1351 | } | ||
1352 | set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs); | ||
1353 | nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp; | ||
1354 | nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1; | ||
1355 | } else { | ||
1356 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1357 | kfree(nesqp->allocated_buffer); | ||
1358 | return ERR_PTR(-EFAULT); | ||
1359 | } | ||
1360 | } | ||
1361 | err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) : | ||
1362 | nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size); | ||
1363 | if (err) { | ||
1364 | nes_debug(NES_DBG_QP, | ||
1365 | "error geting qp mem code = %d\n", err); | ||
1366 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1367 | kfree(nesqp->allocated_buffer); | ||
1368 | return ERR_PTR(-ENOMEM); | ||
1369 | } | ||
1370 | |||
1371 | nesqp->hwqp.sq_size = sq_size; | ||
1372 | nesqp->hwqp.sq_encoded_size = sq_encoded_size; | ||
1373 | nesqp->hwqp.sq_head = 1; | ||
1374 | nesqp->hwqp.rq_size = rq_size; | ||
1375 | nesqp->hwqp.rq_encoded_size = rq_encoded_size; | ||
1376 | /* nes_debug(NES_DBG_QP, "nesqp->nesqp_context_pbase = %p\n", | ||
1377 | (void *)nesqp->nesqp_context_pbase); | ||
1378 | */ | ||
1379 | nesqp->hwqp.qp_id = qp_num; | ||
1380 | nesqp->ibqp.qp_num = nesqp->hwqp.qp_id; | ||
1381 | nesqp->nespd = nespd; | ||
1382 | |||
1383 | nescq = to_nescq(init_attr->send_cq); | ||
1384 | nesqp->nesscq = nescq; | ||
1385 | nescq = to_nescq(init_attr->recv_cq); | ||
1386 | nesqp->nesrcq = nescq; | ||
1387 | |||
1388 | nesqp->nesqp_context->misc |= cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << | ||
1389 | NES_QPCONTEXT_MISC_PCI_FCN_SHIFT); | ||
1390 | nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.rq_encoded_size << | ||
1391 | NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT); | ||
1392 | nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size << | ||
1393 | NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT); | ||
1394 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN); | ||
1395 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN); | ||
1396 | nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number + | ||
1397 | ((u32)nesqp->nesrcq->hw_cq.cq_number << 16)); | ||
1398 | u64temp = (u64)nesqp->hwqp.sq_pbase; | ||
1399 | nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp); | ||
1400 | nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32)); | ||
1401 | |||
1402 | |||
1403 | if (!virt_wqs) { | ||
1404 | u64temp = (u64)nesqp->hwqp.sq_pbase; | ||
1405 | nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp); | ||
1406 | nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32)); | ||
1407 | u64temp = (u64)nesqp->hwqp.rq_pbase; | ||
1408 | nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp); | ||
1409 | nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32)); | ||
1410 | } else { | ||
1411 | u64temp = (u64)nesqp->pbl_pbase; | ||
1412 | nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp); | ||
1413 | nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32)); | ||
1414 | } | ||
1415 | |||
1416 | /* nes_debug(NES_DBG_QP, "next_qp_nic_index=%u, using nic_index=%d\n", | ||
1417 | nesvnic->next_qp_nic_index, | ||
1418 | nesvnic->qp_nic_index[nesvnic->next_qp_nic_index]); */ | ||
1419 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1420 | nesqp->nesqp_context->misc2 |= cpu_to_le32( | ||
1421 | (u32)nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] << | ||
1422 | NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT); | ||
1423 | nesvnic->next_qp_nic_index++; | ||
1424 | if ((nesvnic->next_qp_nic_index > 3) || | ||
1425 | (nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] == 0xf)) { | ||
1426 | nesvnic->next_qp_nic_index = 0; | ||
1427 | } | ||
1428 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1429 | |||
1430 | nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32((u32)nesqp->nespd->pd_id << 16); | ||
1431 | u64temp = (u64)nesqp->hwqp.q2_pbase; | ||
1432 | nesqp->nesqp_context->q2_addr_low = cpu_to_le32((u32)u64temp); | ||
1433 | nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp >> 32)); | ||
1434 | nesqp->nesqp_context->aeq_token_low = cpu_to_le32((u32)((unsigned long)(nesqp))); | ||
1435 | nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(upper_32_bits((unsigned long)(nesqp)))); | ||
1436 | nesqp->nesqp_context->ird_ord_sizes = cpu_to_le32(NES_QPCONTEXT_ORDIRD_ALSMM | | ||
1437 | ((((u32)nesadapter->max_irrq_wr) << | ||
1438 | NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT) & NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK)); | ||
1439 | if (disable_mpa_crc) { | ||
1440 | nes_debug(NES_DBG_QP, "Disabling MPA crc checking due to module option.\n"); | ||
1441 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_RNMC); | ||
1442 | } | ||
1443 | |||
1444 | |||
1445 | /* Create the QP */ | ||
1446 | cqp_request = nes_get_cqp_request(nesdev); | ||
1447 | if (cqp_request == NULL) { | ||
1448 | nes_debug(NES_DBG_QP, "Failed to get a cqp_request\n"); | ||
1449 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1450 | nes_free_qp_mem(nesdev, nesqp,virt_wqs); | ||
1451 | kfree(nesqp->allocated_buffer); | ||
1452 | return ERR_PTR(-ENOMEM); | ||
1453 | } | ||
1454 | cqp_request->waiting = 1; | ||
1455 | cqp_wqe = &cqp_request->cqp_wqe; | ||
1456 | |||
1457 | if (!virt_wqs) { | ||
1458 | opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | | ||
1459 | NES_CQP_QP_IWARP_STATE_IDLE; | ||
1460 | } else { | ||
1461 | opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_VIRT_WQS | | ||
1462 | NES_CQP_QP_IWARP_STATE_IDLE; | ||
1463 | } | ||
1464 | opcode |= NES_CQP_QP_CQS_VALID; | ||
1465 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1466 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); | ||
1467 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); | ||
1468 | |||
1469 | u64temp = (u64)nesqp->nesqp_context_pbase; | ||
1470 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); | ||
1471 | |||
1472 | atomic_set(&cqp_request->refcount, 2); | ||
1473 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
1474 | |||
1475 | /* Wait for CQP */ | ||
1476 | nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n", | ||
1477 | nesqp->hwqp.qp_id); | ||
1478 | ret = wait_event_timeout(cqp_request->waitq, | ||
1479 | (cqp_request->request_done != 0), NES_EVENT_TIMEOUT); | ||
1480 | nes_debug(NES_DBG_QP, "Create iwarp QP%u completed, wait_event_timeout ret=%u," | ||
1481 | " nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u," | ||
1482 | " CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
1483 | nesqp->hwqp.qp_id, ret, nesdev->cqp.sq_head, nesdev->cqp.sq_tail, | ||
1484 | cqp_request->major_code, cqp_request->minor_code); | ||
1485 | if ((!ret) || (cqp_request->major_code)) { | ||
1486 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
1487 | if (cqp_request->dynamic) { | ||
1488 | kfree(cqp_request); | ||
1489 | } else { | ||
1490 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1491 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1492 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1493 | } | ||
1494 | } | ||
1495 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1496 | nes_free_qp_mem(nesdev, nesqp,virt_wqs); | ||
1497 | kfree(nesqp->allocated_buffer); | ||
1498 | if (!ret) { | ||
1499 | return ERR_PTR(-ETIME); | ||
1500 | } else { | ||
1501 | return ERR_PTR(-EIO); | ||
1502 | } | ||
1503 | } else { | ||
1504 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
1505 | if (cqp_request->dynamic) { | ||
1506 | kfree(cqp_request); | ||
1507 | } else { | ||
1508 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1509 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1510 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1511 | } | ||
1512 | } | ||
1513 | } | ||
1514 | |||
1515 | if (ibpd->uobject) { | ||
1516 | uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; | ||
1517 | uresp.actual_sq_size = sq_size; | ||
1518 | uresp.actual_rq_size = rq_size; | ||
1519 | uresp.qp_id = nesqp->hwqp.qp_id; | ||
1520 | uresp.nes_drv_opt = nes_drv_opt; | ||
1521 | if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { | ||
1522 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | ||
1523 | nes_free_qp_mem(nesdev, nesqp,virt_wqs); | ||
1524 | kfree(nesqp->allocated_buffer); | ||
1525 | return ERR_PTR(-EFAULT); | ||
1526 | } | ||
1527 | } | ||
1528 | |||
1529 | nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n", | ||
1530 | nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp)); | ||
1531 | spin_lock_init(&nesqp->lock); | ||
1532 | init_waitqueue_head(&nesqp->state_waitq); | ||
1533 | init_waitqueue_head(&nesqp->kick_waitq); | ||
1534 | nes_add_ref(&nesqp->ibqp); | ||
1535 | break; | ||
1536 | default: | ||
1537 | nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type); | ||
1538 | return ERR_PTR(-EINVAL); | ||
1539 | break; | ||
1540 | } | ||
1541 | |||
1542 | /* update the QP table */ | ||
1543 | nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; | ||
1544 | nes_debug(NES_DBG_QP, "netdev refcnt=%u\n", | ||
1545 | atomic_read(&nesvnic->netdev->refcnt)); | ||
1546 | |||
1547 | return &nesqp->ibqp; | ||
1548 | } | ||
1549 | |||
1550 | |||
1551 | /** | ||
1552 | * nes_destroy_qp | ||
1553 | */ | ||
1554 | static int nes_destroy_qp(struct ib_qp *ibqp) | ||
1555 | { | ||
1556 | struct nes_qp *nesqp = to_nesqp(ibqp); | ||
1557 | /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */ | ||
1558 | struct nes_ucontext *nes_ucontext; | ||
1559 | struct ib_qp_attr attr; | ||
1560 | struct iw_cm_id *cm_id; | ||
1561 | struct iw_cm_event cm_event; | ||
1562 | int ret; | ||
1563 | |||
1564 | atomic_inc(&sw_qps_destroyed); | ||
1565 | nesqp->destroyed = 1; | ||
1566 | |||
1567 | /* Blow away the connection if it exists. */ | ||
1568 | if (nesqp->ibqp_state >= IB_QPS_INIT && nesqp->ibqp_state <= IB_QPS_RTS) { | ||
1569 | /* if (nesqp->ibqp_state == IB_QPS_RTS) { */ | ||
1570 | attr.qp_state = IB_QPS_ERR; | ||
1571 | nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); | ||
1572 | } | ||
1573 | |||
1574 | if (((nesqp->ibqp_state == IB_QPS_INIT) || | ||
1575 | (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { | ||
1576 | cm_id = nesqp->cm_id; | ||
1577 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | ||
1578 | cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; | ||
1579 | cm_event.local_addr = cm_id->local_addr; | ||
1580 | cm_event.remote_addr = cm_id->remote_addr; | ||
1581 | cm_event.private_data = NULL; | ||
1582 | cm_event.private_data_len = 0; | ||
1583 | |||
1584 | nes_debug(NES_DBG_QP, "Generating a CM Timeout Event for " | ||
1585 | "QP%u. cm_id = %p, refcount = %u. \n", | ||
1586 | nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount)); | ||
1587 | |||
1588 | cm_id->rem_ref(cm_id); | ||
1589 | ret = cm_id->event_handler(cm_id, &cm_event); | ||
1590 | if (ret) | ||
1591 | nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); | ||
1592 | } | ||
1593 | |||
1594 | |||
1595 | if (nesqp->user_mode) { | ||
1596 | if ((ibqp->uobject)&&(ibqp->uobject->context)) { | ||
1597 | nes_ucontext = to_nesucontext(ibqp->uobject->context); | ||
1598 | clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs); | ||
1599 | nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL; | ||
1600 | if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) { | ||
1601 | nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index; | ||
1602 | } | ||
1603 | } | ||
1604 | if (nesqp->pbl_pbase) | ||
1605 | kunmap(nesqp->page); | ||
1606 | } | ||
1607 | |||
1608 | nes_rem_ref(&nesqp->ibqp); | ||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1612 | |||
1613 | /** | ||
1614 | * nes_create_cq | ||
1615 | */ | ||
1616 | static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | ||
1617 | int comp_vector, | ||
1618 | struct ib_ucontext *context, struct ib_udata *udata) | ||
1619 | { | ||
1620 | u64 u64temp; | ||
1621 | struct nes_vnic *nesvnic = to_nesvnic(ibdev); | ||
1622 | struct nes_device *nesdev = nesvnic->nesdev; | ||
1623 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1624 | struct nes_cq *nescq; | ||
1625 | struct nes_ucontext *nes_ucontext = NULL; | ||
1626 | struct nes_cqp_request *cqp_request; | ||
1627 | void *mem = NULL; | ||
1628 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1629 | struct nes_pbl *nespbl = NULL; | ||
1630 | struct nes_create_cq_req req; | ||
1631 | struct nes_create_cq_resp resp; | ||
1632 | u32 cq_num = 0; | ||
1633 | u32 opcode = 0; | ||
1634 | u32 pbl_entries = 1; | ||
1635 | int err; | ||
1636 | unsigned long flags; | ||
1637 | int ret; | ||
1638 | |||
1639 | err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs, | ||
1640 | nesadapter->max_cq, &cq_num, &nesadapter->next_cq); | ||
1641 | if (err) { | ||
1642 | return ERR_PTR(err); | ||
1643 | } | ||
1644 | |||
1645 | nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL); | ||
1646 | if (!nescq) { | ||
1647 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1648 | nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n"); | ||
1649 | return ERR_PTR(-ENOMEM); | ||
1650 | } | ||
1651 | |||
1652 | nescq->hw_cq.cq_size = max(entries + 1, 5); | ||
1653 | nescq->hw_cq.cq_number = cq_num; | ||
1654 | nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; | ||
1655 | |||
1656 | |||
1657 | if (context) { | ||
1658 | nes_ucontext = to_nesucontext(context); | ||
1659 | if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) { | ||
1660 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1661 | kfree(nescq); | ||
1662 | return ERR_PTR(-EFAULT); | ||
1663 | } | ||
1664 | nesvnic->mcrq_ucontext = nes_ucontext; | ||
1665 | nes_ucontext->mcrqf = req.mcrqf; | ||
1666 | if (nes_ucontext->mcrqf) { | ||
1667 | if (nes_ucontext->mcrqf & 0x80000000) | ||
1668 | nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; | ||
1669 | else if (nes_ucontext->mcrqf & 0x40000000) | ||
1670 | nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; | ||
1671 | else | ||
1672 | nescq->hw_cq.cq_number = nesvnic->mcrq_qp_id + nes_ucontext->mcrqf-1; | ||
1673 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1674 | } | ||
1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", | ||
1676 | (unsigned long)req.user_cq_buffer, entries); | ||
1677 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { | ||
1678 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { | ||
1679 | list_del(&nespbl->list); | ||
1680 | err = 0; | ||
1681 | nes_debug(NES_DBG_CQ, "Found PBL for virtual CQ. nespbl=%p.\n", | ||
1682 | nespbl); | ||
1683 | break; | ||
1684 | } | ||
1685 | } | ||
1686 | if (err) { | ||
1687 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1688 | kfree(nescq); | ||
1689 | return ERR_PTR(err); | ||
1690 | } | ||
1691 | |||
1692 | pbl_entries = nespbl->pbl_size >> 3; | ||
1693 | nescq->cq_mem_size = 0; | ||
1694 | } else { | ||
1695 | nescq->cq_mem_size = nescq->hw_cq.cq_size * sizeof(struct nes_hw_cqe); | ||
1696 | nes_debug(NES_DBG_CQ, "Attempting to allocate pci memory (%u entries, %u bytes) for CQ%u.\n", | ||
1697 | entries, nescq->cq_mem_size, nescq->hw_cq.cq_number); | ||
1698 | |||
1699 | /* allocate the physical buffer space */ | ||
1700 | mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size, | ||
1701 | &nescq->hw_cq.cq_pbase); | ||
1702 | if (!mem) { | ||
1703 | printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n"); | ||
1704 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1705 | kfree(nescq); | ||
1706 | return ERR_PTR(-ENOMEM); | ||
1707 | } | ||
1708 | |||
1709 | memset(mem, 0, nescq->cq_mem_size); | ||
1710 | nescq->hw_cq.cq_vbase = mem; | ||
1711 | nescq->hw_cq.cq_head = 0; | ||
1712 | nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n", | ||
1713 | nescq->hw_cq.cq_number, nescq->hw_cq.cq_vbase, | ||
1714 | (u32)nescq->hw_cq.cq_pbase); | ||
1715 | } | ||
1716 | |||
1717 | nescq->hw_cq.ce_handler = nes_iwarp_ce_handler; | ||
1718 | spin_lock_init(&nescq->lock); | ||
1719 | |||
1720 | /* send CreateCQ request to CQP */ | ||
1721 | cqp_request = nes_get_cqp_request(nesdev); | ||
1722 | if (cqp_request == NULL) { | ||
1723 | nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n"); | ||
1724 | if (!context) | ||
1725 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | ||
1726 | nescq->hw_cq.cq_pbase); | ||
1727 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1728 | kfree(nescq); | ||
1729 | return ERR_PTR(-ENOMEM); | ||
1730 | } | ||
1731 | cqp_request->waiting = 1; | ||
1732 | cqp_wqe = &cqp_request->cqp_wqe; | ||
1733 | |||
1734 | opcode = NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | | ||
1735 | NES_CQP_CQ_CHK_OVERFLOW | | ||
1736 | NES_CQP_CQ_CEQE_MASK | ((u32)nescq->hw_cq.cq_size << 16); | ||
1737 | |||
1738 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
1739 | |||
1740 | if (pbl_entries != 1) { | ||
1741 | if (pbl_entries > 32) { | ||
1742 | /* use 4k pbl */ | ||
1743 | nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries); | ||
1744 | if (nesadapter->free_4kpbl == 0) { | ||
1745 | if (cqp_request->dynamic) { | ||
1746 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1747 | kfree(cqp_request); | ||
1748 | } else { | ||
1749 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1750 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1751 | } | ||
1752 | if (!context) | ||
1753 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | ||
1754 | nescq->hw_cq.cq_pbase); | ||
1755 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1756 | kfree(nescq); | ||
1757 | return ERR_PTR(-ENOMEM); | ||
1758 | } else { | ||
1759 | opcode |= (NES_CQP_CQ_VIRT | NES_CQP_CQ_4KB_CHUNK); | ||
1760 | nescq->virtual_cq = 2; | ||
1761 | nesadapter->free_4kpbl--; | ||
1762 | } | ||
1763 | } else { | ||
1764 | /* use 256 byte pbl */ | ||
1765 | nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries); | ||
1766 | if (nesadapter->free_256pbl == 0) { | ||
1767 | if (cqp_request->dynamic) { | ||
1768 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1769 | kfree(cqp_request); | ||
1770 | } else { | ||
1771 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1772 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1773 | } | ||
1774 | if (!context) | ||
1775 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | ||
1776 | nescq->hw_cq.cq_pbase); | ||
1777 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1778 | kfree(nescq); | ||
1779 | return ERR_PTR(-ENOMEM); | ||
1780 | } else { | ||
1781 | opcode |= NES_CQP_CQ_VIRT; | ||
1782 | nescq->virtual_cq = 1; | ||
1783 | nesadapter->free_256pbl--; | ||
1784 | } | ||
1785 | } | ||
1786 | } | ||
1787 | |||
1788 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1789 | |||
1790 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1791 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); | ||
1792 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1793 | (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16))); | ||
1794 | |||
1795 | if (context) { | ||
1796 | if (pbl_entries != 1) | ||
1797 | u64temp = (u64)nespbl->pbl_pbase; | ||
1798 | else | ||
1799 | u64temp = le64_to_cpu(nespbl->pbl_vbase[0]); | ||
1800 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX, | ||
1801 | nes_ucontext->mmap_db_index[0]); | ||
1802 | } else { | ||
1803 | u64temp = (u64)nescq->hw_cq.cq_pbase; | ||
1804 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; | ||
1805 | } | ||
1806 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); | ||
1807 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; | ||
1808 | u64temp = (u64)(unsigned long)&nescq->hw_cq; | ||
1809 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = | ||
1810 | cpu_to_le32((u32)(u64temp >> 1)); | ||
1811 | cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = | ||
1812 | cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); | ||
1813 | |||
1814 | atomic_set(&cqp_request->refcount, 2); | ||
1815 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
1816 | |||
1817 | /* Wait for CQP */ | ||
1818 | nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n", | ||
1819 | nescq->hw_cq.cq_number); | ||
1820 | ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done), | ||
1821 | NES_EVENT_TIMEOUT * 2); | ||
1822 | nes_debug(NES_DBG_CQ, "Create iWARP CQ%u completed, wait_event_timeout ret = %d.\n", | ||
1823 | nescq->hw_cq.cq_number, ret); | ||
1824 | if ((!ret) || (cqp_request->major_code)) { | ||
1825 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
1826 | if (cqp_request->dynamic) { | ||
1827 | kfree(cqp_request); | ||
1828 | } else { | ||
1829 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1830 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1831 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1832 | } | ||
1833 | } | ||
1834 | nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X," | ||
1835 | " minor code = 0x%04X\n", | ||
1836 | nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code); | ||
1837 | if (!context) | ||
1838 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | ||
1839 | nescq->hw_cq.cq_pbase); | ||
1840 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1841 | kfree(nescq); | ||
1842 | return ERR_PTR(-EIO); | ||
1843 | } else { | ||
1844 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
1845 | if (cqp_request->dynamic) { | ||
1846 | kfree(cqp_request); | ||
1847 | } else { | ||
1848 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1849 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1850 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1851 | } | ||
1852 | } | ||
1853 | } | ||
1854 | |||
1855 | if (context) { | ||
1856 | /* free the nespbl */ | ||
1857 | pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, | ||
1858 | nespbl->pbl_pbase); | ||
1859 | kfree(nespbl); | ||
1860 | resp.cq_id = nescq->hw_cq.cq_number; | ||
1861 | resp.cq_size = nescq->hw_cq.cq_size; | ||
1862 | resp.mmap_db_index = 0; | ||
1863 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | ||
1864 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | ||
1865 | kfree(nescq); | ||
1866 | return ERR_PTR(-EFAULT); | ||
1867 | } | ||
1868 | } | ||
1869 | |||
1870 | return &nescq->ibcq; | ||
1871 | } | ||
1872 | |||
1873 | |||
1874 | /** | ||
1875 | * nes_destroy_cq | ||
1876 | */ | ||
1877 | static int nes_destroy_cq(struct ib_cq *ib_cq) | ||
1878 | { | ||
1879 | struct nes_cq *nescq; | ||
1880 | struct nes_device *nesdev; | ||
1881 | struct nes_vnic *nesvnic; | ||
1882 | struct nes_adapter *nesadapter; | ||
1883 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1884 | struct nes_cqp_request *cqp_request; | ||
1885 | unsigned long flags; | ||
1886 | u32 opcode = 0; | ||
1887 | int ret; | ||
1888 | |||
1889 | if (ib_cq == NULL) | ||
1890 | return 0; | ||
1891 | |||
1892 | nescq = to_nescq(ib_cq); | ||
1893 | nesvnic = to_nesvnic(ib_cq->device); | ||
1894 | nesdev = nesvnic->nesdev; | ||
1895 | nesadapter = nesdev->nesadapter; | ||
1896 | |||
1897 | nes_debug(NES_DBG_CQ, "Destroy CQ%u\n", nescq->hw_cq.cq_number); | ||
1898 | |||
1899 | /* Send DestroyCQ request to CQP */ | ||
1900 | cqp_request = nes_get_cqp_request(nesdev); | ||
1901 | if (cqp_request == NULL) { | ||
1902 | nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n"); | ||
1903 | return -ENOMEM; | ||
1904 | } | ||
1905 | cqp_request->waiting = 1; | ||
1906 | cqp_wqe = &cqp_request->cqp_wqe; | ||
1907 | opcode = NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16); | ||
1908 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
1909 | if (nescq->virtual_cq == 1) { | ||
1910 | nesadapter->free_256pbl++; | ||
1911 | if (nesadapter->free_256pbl > nesadapter->max_256pbl) { | ||
1912 | printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n", | ||
1913 | __FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl); | ||
1914 | } | ||
1915 | } else if (nescq->virtual_cq == 2) { | ||
1916 | nesadapter->free_4kpbl++; | ||
1917 | if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) { | ||
1918 | printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n", | ||
1919 | __FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl); | ||
1920 | } | ||
1921 | opcode |= NES_CQP_CQ_4KB_CHUNK; | ||
1922 | } | ||
1923 | |||
1924 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
1925 | |||
1926 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
1927 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); | ||
1928 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, | ||
1929 | (nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16))); | ||
1930 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number); | ||
1931 | atomic_set(&cqp_request->refcount, 2); | ||
1932 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
1933 | |||
1934 | /* Wait for CQP */ | ||
1935 | nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n", | ||
1936 | nescq->hw_cq.cq_number); | ||
1937 | ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done), | ||
1938 | NES_EVENT_TIMEOUT); | ||
1939 | nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u," | ||
1940 | " CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
1941 | nescq->hw_cq.cq_number, ret, cqp_request->major_code, | ||
1942 | cqp_request->minor_code); | ||
1943 | if ((!ret) || (cqp_request->major_code)) { | ||
1944 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
1945 | if (cqp_request->dynamic) { | ||
1946 | kfree(cqp_request); | ||
1947 | } else { | ||
1948 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1949 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1950 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1951 | } | ||
1952 | } | ||
1953 | if (!ret) { | ||
1954 | nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n", | ||
1955 | nescq->hw_cq.cq_number); | ||
1956 | ret = -ETIME; | ||
1957 | } else { | ||
1958 | nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n", | ||
1959 | nescq->hw_cq.cq_number); | ||
1960 | ret = -EIO; | ||
1961 | } | ||
1962 | } else { | ||
1963 | ret = 0; | ||
1964 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
1965 | if (cqp_request->dynamic) { | ||
1966 | kfree(cqp_request); | ||
1967 | } else { | ||
1968 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
1969 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
1970 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
1971 | } | ||
1972 | } | ||
1973 | } | ||
1974 | |||
1975 | if (nescq->cq_mem_size) | ||
1976 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, | ||
1977 | (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); | ||
1978 | kfree(nescq); | ||
1979 | |||
1980 | return ret; | ||
1981 | } | ||
1982 | |||
1983 | |||
1984 | /** | ||
1985 | * nes_reg_mr | ||
1986 | */ | ||
1987 | static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, | ||
1988 | u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl, | ||
1989 | dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count, | ||
1990 | int acc, u64 *iova_start) | ||
1991 | { | ||
1992 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
1993 | struct nes_cqp_request *cqp_request; | ||
1994 | unsigned long flags; | ||
1995 | int ret; | ||
1996 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1997 | /* int count; */ | ||
1998 | u32 opcode = 0; | ||
1999 | u16 major_code; | ||
2000 | |||
2001 | /* Register the region with the adapter */ | ||
2002 | cqp_request = nes_get_cqp_request(nesdev); | ||
2003 | if (cqp_request == NULL) { | ||
2004 | nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n"); | ||
2005 | return -ENOMEM; | ||
2006 | } | ||
2007 | cqp_request->waiting = 1; | ||
2008 | cqp_wqe = &cqp_request->cqp_wqe; | ||
2009 | |||
2010 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
2011 | /* track PBL resources */ | ||
2012 | if (pbl_count != 0) { | ||
2013 | if (pbl_count > 1) { | ||
2014 | /* Two level PBL */ | ||
2015 | if ((pbl_count+1) > nesadapter->free_4kpbl) { | ||
2016 | nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); | ||
2017 | if (cqp_request->dynamic) { | ||
2018 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2019 | kfree(cqp_request); | ||
2020 | } else { | ||
2021 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2022 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2023 | } | ||
2024 | return -ENOMEM; | ||
2025 | } else { | ||
2026 | nesadapter->free_4kpbl -= pbl_count+1; | ||
2027 | } | ||
2028 | } else if (residual_page_count > 32) { | ||
2029 | if (pbl_count > nesadapter->free_4kpbl) { | ||
2030 | nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); | ||
2031 | if (cqp_request->dynamic) { | ||
2032 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2033 | kfree(cqp_request); | ||
2034 | } else { | ||
2035 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2036 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2037 | } | ||
2038 | return -ENOMEM; | ||
2039 | } else { | ||
2040 | nesadapter->free_4kpbl -= pbl_count; | ||
2041 | } | ||
2042 | } else { | ||
2043 | if (pbl_count > nesadapter->free_256pbl) { | ||
2044 | nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); | ||
2045 | if (cqp_request->dynamic) { | ||
2046 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2047 | kfree(cqp_request); | ||
2048 | } else { | ||
2049 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2050 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2051 | } | ||
2052 | return -ENOMEM; | ||
2053 | } else { | ||
2054 | nesadapter->free_256pbl -= pbl_count; | ||
2055 | } | ||
2056 | } | ||
2057 | } | ||
2058 | |||
2059 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2060 | |||
2061 | opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ | | ||
2062 | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR; | ||
2063 | if (acc & IB_ACCESS_LOCAL_WRITE) | ||
2064 | opcode |= NES_CQP_STAG_RIGHTS_LOCAL_WRITE; | ||
2065 | if (acc & IB_ACCESS_REMOTE_WRITE) | ||
2066 | opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_REM_ACC_EN; | ||
2067 | if (acc & IB_ACCESS_REMOTE_READ) | ||
2068 | opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_REM_ACC_EN; | ||
2069 | if (acc & IB_ACCESS_MW_BIND) | ||
2070 | opcode |= NES_CQP_STAG_RIGHTS_WINDOW_BIND | NES_CQP_STAG_REM_ACC_EN; | ||
2071 | |||
2072 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
2073 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); | ||
2074 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, *iova_start); | ||
2075 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, region_length); | ||
2076 | |||
2077 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] = | ||
2078 | cpu_to_le32((u32)(region_length >> 8) & 0xff000000); | ||
2079 | cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |= | ||
2080 | cpu_to_le32(nespd->pd_id & 0x00007fff); | ||
2081 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag); | ||
2082 | |||
2083 | if (pbl_count == 0) { | ||
2084 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, single_buffer); | ||
2085 | } else { | ||
2086 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase); | ||
2087 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count); | ||
2088 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, | ||
2089 | (((pbl_count - 1) * 4096) + (residual_page_count*8))); | ||
2090 | |||
2091 | if ((pbl_count > 1) || (residual_page_count > 32)) | ||
2092 | cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE); | ||
2093 | } | ||
2094 | barrier(); | ||
2095 | |||
2096 | atomic_set(&cqp_request->refcount, 2); | ||
2097 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
2098 | |||
2099 | /* Wait for CQP */ | ||
2100 | ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done), | ||
2101 | NES_EVENT_TIMEOUT); | ||
2102 | nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u," | ||
2103 | " CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
2104 | stag, ret, cqp_request->major_code, cqp_request->minor_code); | ||
2105 | major_code = cqp_request->major_code; | ||
2106 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
2107 | if (cqp_request->dynamic) { | ||
2108 | kfree(cqp_request); | ||
2109 | } else { | ||
2110 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2111 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2112 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2113 | } | ||
2114 | } | ||
2115 | if (!ret) | ||
2116 | return -ETIME; | ||
2117 | else if (major_code) | ||
2118 | return -EIO; | ||
2119 | else | ||
2120 | return 0; | ||
2121 | |||
2122 | return 0; | ||
2123 | } | ||
2124 | |||
2125 | |||
2126 | /** | ||
2127 | * nes_reg_phys_mr | ||
2128 | */ | ||
2129 | static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd, | ||
2130 | struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, | ||
2131 | u64 * iova_start) | ||
2132 | { | ||
2133 | u64 region_length; | ||
2134 | struct nes_pd *nespd = to_nespd(ib_pd); | ||
2135 | struct nes_vnic *nesvnic = to_nesvnic(ib_pd->device); | ||
2136 | struct nes_device *nesdev = nesvnic->nesdev; | ||
2137 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
2138 | struct nes_mr *nesmr; | ||
2139 | struct ib_mr *ibmr; | ||
2140 | struct nes_vpbl vpbl; | ||
2141 | struct nes_root_vpbl root_vpbl; | ||
2142 | u32 stag; | ||
2143 | u32 i; | ||
2144 | u32 stag_index = 0; | ||
2145 | u32 next_stag_index = 0; | ||
2146 | u32 driver_key = 0; | ||
2147 | u32 root_pbl_index = 0; | ||
2148 | u32 cur_pbl_index = 0; | ||
2149 | int err = 0, pbl_depth = 0; | ||
2150 | int ret = 0; | ||
2151 | u16 pbl_count = 0; | ||
2152 | u8 single_page = 1; | ||
2153 | u8 stag_key = 0; | ||
2154 | |||
2155 | pbl_depth = 0; | ||
2156 | region_length = 0; | ||
2157 | vpbl.pbl_vbase = NULL; | ||
2158 | root_vpbl.pbl_vbase = NULL; | ||
2159 | root_vpbl.pbl_pbase = 0; | ||
2160 | |||
2161 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); | ||
2162 | stag_key = (u8)next_stag_index; | ||
2163 | |||
2164 | driver_key = 0; | ||
2165 | |||
2166 | next_stag_index >>= 8; | ||
2167 | next_stag_index %= nesadapter->max_mr; | ||
2168 | if (num_phys_buf > (1024*512)) { | ||
2169 | return ERR_PTR(-E2BIG); | ||
2170 | } | ||
2171 | |||
2172 | err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr, | ||
2173 | &stag_index, &next_stag_index); | ||
2174 | if (err) { | ||
2175 | return ERR_PTR(err); | ||
2176 | } | ||
2177 | |||
2178 | nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL); | ||
2179 | if (!nesmr) { | ||
2180 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2181 | return ERR_PTR(-ENOMEM); | ||
2182 | } | ||
2183 | |||
2184 | for (i = 0; i < num_phys_buf; i++) { | ||
2185 | |||
2186 | if ((i & 0x01FF) == 0) { | ||
2187 | if (root_pbl_index == 1) { | ||
2188 | /* Allocate the root PBL */ | ||
2189 | root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192, | ||
2190 | &root_vpbl.pbl_pbase); | ||
2191 | nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n", | ||
2192 | root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase); | ||
2193 | if (!root_vpbl.pbl_vbase) { | ||
2194 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
2195 | vpbl.pbl_pbase); | ||
2196 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2197 | kfree(nesmr); | ||
2198 | return ERR_PTR(-ENOMEM); | ||
2199 | } | ||
2200 | root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); | ||
2201 | if (!root_vpbl.leaf_vpbl) { | ||
2202 | pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase, | ||
2203 | root_vpbl.pbl_pbase); | ||
2204 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
2205 | vpbl.pbl_pbase); | ||
2206 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2207 | kfree(nesmr); | ||
2208 | return ERR_PTR(-ENOMEM); | ||
2209 | } | ||
2210 | root_vpbl.pbl_vbase[0].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase); | ||
2211 | root_vpbl.pbl_vbase[0].pa_high = | ||
2212 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32))); | ||
2213 | root_vpbl.leaf_vpbl[0] = vpbl; | ||
2214 | } | ||
2215 | /* Allocate a 4K buffer for the PBL */ | ||
2216 | vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096, | ||
2217 | &vpbl.pbl_pbase); | ||
2218 | nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%016lX\n", | ||
2219 | vpbl.pbl_vbase, (unsigned long)vpbl.pbl_pbase); | ||
2220 | if (!vpbl.pbl_vbase) { | ||
2221 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2222 | ibmr = ERR_PTR(-ENOMEM); | ||
2223 | kfree(nesmr); | ||
2224 | goto reg_phys_err; | ||
2225 | } | ||
2226 | /* Fill in the root table */ | ||
2227 | if (1 <= root_pbl_index) { | ||
2228 | root_vpbl.pbl_vbase[root_pbl_index].pa_low = | ||
2229 | cpu_to_le32((u32)vpbl.pbl_pbase); | ||
2230 | root_vpbl.pbl_vbase[root_pbl_index].pa_high = | ||
2231 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32))); | ||
2232 | root_vpbl.leaf_vpbl[root_pbl_index] = vpbl; | ||
2233 | } | ||
2234 | root_pbl_index++; | ||
2235 | cur_pbl_index = 0; | ||
2236 | } | ||
2237 | if (buffer_list[i].addr & ~PAGE_MASK) { | ||
2238 | /* TODO: Unwind allocated buffers */ | ||
2239 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2240 | nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n", | ||
2241 | (unsigned int) buffer_list[i].addr); | ||
2242 | ibmr = ERR_PTR(-EINVAL); | ||
2243 | kfree(nesmr); | ||
2244 | goto reg_phys_err; | ||
2245 | } | ||
2246 | |||
2247 | if (!buffer_list[i].size) { | ||
2248 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2249 | nes_debug(NES_DBG_MR, "Invalid Buffer Size\n"); | ||
2250 | ibmr = ERR_PTR(-EINVAL); | ||
2251 | kfree(nesmr); | ||
2252 | goto reg_phys_err; | ||
2253 | } | ||
2254 | |||
2255 | region_length += buffer_list[i].size; | ||
2256 | if ((i != 0) && (single_page)) { | ||
2257 | if ((buffer_list[i-1].addr+PAGE_SIZE) != buffer_list[i].addr) | ||
2258 | single_page = 0; | ||
2259 | } | ||
2260 | vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr); | ||
2261 | vpbl.pbl_vbase[cur_pbl_index++].pa_high = | ||
2262 | cpu_to_le32((u32)((((u64)buffer_list[i].addr) >> 32))); | ||
2263 | } | ||
2264 | |||
2265 | stag = stag_index << 8; | ||
2266 | stag |= driver_key; | ||
2267 | stag += (u32)stag_key; | ||
2268 | |||
2269 | nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%016lX," | ||
2270 | " length = 0x%016lX, index = 0x%08X\n", | ||
2271 | stag, (unsigned long)*iova_start, (unsigned long)region_length, stag_index); | ||
2272 | |||
2273 | region_length -= (*iova_start)&PAGE_MASK; | ||
2274 | |||
2275 | /* Make the leaf PBL the root if only one PBL */ | ||
2276 | if (root_pbl_index == 1) { | ||
2277 | root_vpbl.pbl_pbase = vpbl.pbl_pbase; | ||
2278 | } | ||
2279 | |||
2280 | if (single_page) { | ||
2281 | pbl_count = 0; | ||
2282 | } else { | ||
2283 | pbl_count = root_pbl_index; | ||
2284 | } | ||
2285 | ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl, | ||
2286 | buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start); | ||
2287 | |||
2288 | if (ret == 0) { | ||
2289 | nesmr->ibmr.rkey = stag; | ||
2290 | nesmr->ibmr.lkey = stag; | ||
2291 | nesmr->mode = IWNES_MEMREG_TYPE_MEM; | ||
2292 | ibmr = &nesmr->ibmr; | ||
2293 | nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0; | ||
2294 | nesmr->pbls_used = pbl_count; | ||
2295 | if (pbl_count > 1) { | ||
2296 | nesmr->pbls_used++; | ||
2297 | } | ||
2298 | } else { | ||
2299 | kfree(nesmr); | ||
2300 | ibmr = ERR_PTR(-ENOMEM); | ||
2301 | } | ||
2302 | |||
2303 | reg_phys_err: | ||
2304 | /* free the resources */ | ||
2305 | if (root_pbl_index == 1) { | ||
2306 | /* single PBL case */ | ||
2307 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, vpbl.pbl_pbase); | ||
2308 | } else { | ||
2309 | for (i=0; i<root_pbl_index; i++) { | ||
2310 | pci_free_consistent(nesdev->pcidev, 4096, root_vpbl.leaf_vpbl[i].pbl_vbase, | ||
2311 | root_vpbl.leaf_vpbl[i].pbl_pbase); | ||
2312 | } | ||
2313 | kfree(root_vpbl.leaf_vpbl); | ||
2314 | pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase, | ||
2315 | root_vpbl.pbl_pbase); | ||
2316 | } | ||
2317 | |||
2318 | return ibmr; | ||
2319 | } | ||
2320 | |||
2321 | |||
2322 | /** | ||
2323 | * nes_get_dma_mr | ||
2324 | */ | ||
2325 | static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc) | ||
2326 | { | ||
2327 | struct ib_phys_buf bl; | ||
2328 | u64 kva = 0; | ||
2329 | |||
2330 | nes_debug(NES_DBG_MR, "\n"); | ||
2331 | |||
2332 | bl.size = (u64)0xffffffffffULL; | ||
2333 | bl.addr = 0; | ||
2334 | return nes_reg_phys_mr(pd, &bl, 1, acc, &kva); | ||
2335 | } | ||
2336 | |||
2337 | |||
2338 | /** | ||
2339 | * nes_reg_user_mr | ||
2340 | */ | ||
2341 | static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | ||
2342 | u64 virt, int acc, struct ib_udata *udata) | ||
2343 | { | ||
2344 | u64 iova_start; | ||
2345 | __le64 *pbl; | ||
2346 | u64 region_length; | ||
2347 | dma_addr_t last_dma_addr = 0; | ||
2348 | dma_addr_t first_dma_addr = 0; | ||
2349 | struct nes_pd *nespd = to_nespd(pd); | ||
2350 | struct nes_vnic *nesvnic = to_nesvnic(pd->device); | ||
2351 | struct nes_device *nesdev = nesvnic->nesdev; | ||
2352 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
2353 | struct ib_mr *ibmr = ERR_PTR(-EINVAL); | ||
2354 | struct ib_umem_chunk *chunk; | ||
2355 | struct nes_ucontext *nes_ucontext; | ||
2356 | struct nes_pbl *nespbl; | ||
2357 | struct nes_mr *nesmr; | ||
2358 | struct ib_umem *region; | ||
2359 | struct nes_mem_reg_req req; | ||
2360 | struct nes_vpbl vpbl; | ||
2361 | struct nes_root_vpbl root_vpbl; | ||
2362 | int nmap_index, page_index; | ||
2363 | int page_count = 0; | ||
2364 | int err, pbl_depth = 0; | ||
2365 | int chunk_pages; | ||
2366 | int ret; | ||
2367 | u32 stag; | ||
2368 | u32 stag_index = 0; | ||
2369 | u32 next_stag_index; | ||
2370 | u32 driver_key; | ||
2371 | u32 root_pbl_index = 0; | ||
2372 | u32 cur_pbl_index = 0; | ||
2373 | u32 skip_pages; | ||
2374 | u16 pbl_count; | ||
2375 | u8 single_page = 1; | ||
2376 | u8 stag_key; | ||
2377 | |||
2378 | region = ib_umem_get(pd->uobject->context, start, length, acc); | ||
2379 | if (IS_ERR(region)) { | ||
2380 | return (struct ib_mr *)region; | ||
2381 | } | ||
2382 | |||
2383 | nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u," | ||
2384 | " offset = %u, page size = %u.\n", | ||
2385 | (unsigned long int)start, (unsigned long int)virt, (u32)length, | ||
2386 | region->offset, region->page_size); | ||
2387 | |||
2388 | skip_pages = ((u32)region->offset) >> 12; | ||
2389 | |||
2390 | if (ib_copy_from_udata(&req, udata, sizeof(req))) | ||
2391 | return ERR_PTR(-EFAULT); | ||
2392 | nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type); | ||
2393 | |||
2394 | switch (req.reg_type) { | ||
2395 | case IWNES_MEMREG_TYPE_MEM: | ||
2396 | pbl_depth = 0; | ||
2397 | region_length = 0; | ||
2398 | vpbl.pbl_vbase = NULL; | ||
2399 | root_vpbl.pbl_vbase = NULL; | ||
2400 | root_vpbl.pbl_pbase = 0; | ||
2401 | |||
2402 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); | ||
2403 | stag_key = (u8)next_stag_index; | ||
2404 | |||
2405 | driver_key = next_stag_index & 0x70000000; | ||
2406 | |||
2407 | next_stag_index >>= 8; | ||
2408 | next_stag_index %= nesadapter->max_mr; | ||
2409 | |||
2410 | err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, | ||
2411 | nesadapter->max_mr, &stag_index, &next_stag_index); | ||
2412 | if (err) { | ||
2413 | ib_umem_release(region); | ||
2414 | return ERR_PTR(err); | ||
2415 | } | ||
2416 | |||
2417 | nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL); | ||
2418 | if (!nesmr) { | ||
2419 | ib_umem_release(region); | ||
2420 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2421 | return ERR_PTR(-ENOMEM); | ||
2422 | } | ||
2423 | nesmr->region = region; | ||
2424 | |||
2425 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | ||
2426 | nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n", | ||
2427 | chunk->nents, chunk->nmap); | ||
2428 | for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { | ||
2429 | if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) { | ||
2430 | ib_umem_release(region); | ||
2431 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2432 | nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n", | ||
2433 | (unsigned int) sg_dma_address(&chunk->page_list[nmap_index])); | ||
2434 | ibmr = ERR_PTR(-EINVAL); | ||
2435 | kfree(nesmr); | ||
2436 | goto reg_user_mr_err; | ||
2437 | } | ||
2438 | |||
2439 | if (!sg_dma_len(&chunk->page_list[nmap_index])) { | ||
2440 | ib_umem_release(region); | ||
2441 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
2442 | stag_index); | ||
2443 | nes_debug(NES_DBG_MR, "Invalid Buffer Size\n"); | ||
2444 | ibmr = ERR_PTR(-EINVAL); | ||
2445 | kfree(nesmr); | ||
2446 | goto reg_user_mr_err; | ||
2447 | } | ||
2448 | |||
2449 | region_length += sg_dma_len(&chunk->page_list[nmap_index]); | ||
2450 | chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12; | ||
2451 | region_length -= skip_pages << 12; | ||
2452 | for (page_index=skip_pages; page_index < chunk_pages; page_index++) { | ||
2453 | skip_pages = 0; | ||
2454 | if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length) | ||
2455 | goto enough_pages; | ||
2456 | if ((page_count&0x01FF) == 0) { | ||
2457 | if (page_count>(1024*512)) { | ||
2458 | ib_umem_release(region); | ||
2459 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
2460 | vpbl.pbl_pbase); | ||
2461 | nes_free_resource(nesadapter, | ||
2462 | nesadapter->allocated_mrs, stag_index); | ||
2463 | kfree(nesmr); | ||
2464 | ibmr = ERR_PTR(-E2BIG); | ||
2465 | goto reg_user_mr_err; | ||
2466 | } | ||
2467 | if (root_pbl_index == 1) { | ||
2468 | root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, | ||
2469 | 8192, &root_vpbl.pbl_pbase); | ||
2470 | nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n", | ||
2471 | root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase); | ||
2472 | if (!root_vpbl.pbl_vbase) { | ||
2473 | ib_umem_release(region); | ||
2474 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
2475 | vpbl.pbl_pbase); | ||
2476 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
2477 | stag_index); | ||
2478 | kfree(nesmr); | ||
2479 | ibmr = ERR_PTR(-ENOMEM); | ||
2480 | goto reg_user_mr_err; | ||
2481 | } | ||
2482 | root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, | ||
2483 | GFP_KERNEL); | ||
2484 | if (!root_vpbl.leaf_vpbl) { | ||
2485 | ib_umem_release(region); | ||
2486 | pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase, | ||
2487 | root_vpbl.pbl_pbase); | ||
2488 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
2489 | vpbl.pbl_pbase); | ||
2490 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
2491 | stag_index); | ||
2492 | kfree(nesmr); | ||
2493 | ibmr = ERR_PTR(-ENOMEM); | ||
2494 | goto reg_user_mr_err; | ||
2495 | } | ||
2496 | root_vpbl.pbl_vbase[0].pa_low = | ||
2497 | cpu_to_le32((u32)vpbl.pbl_pbase); | ||
2498 | root_vpbl.pbl_vbase[0].pa_high = | ||
2499 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32))); | ||
2500 | root_vpbl.leaf_vpbl[0] = vpbl; | ||
2501 | } | ||
2502 | vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096, | ||
2503 | &vpbl.pbl_pbase); | ||
2504 | nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n", | ||
2505 | vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase); | ||
2506 | if (!vpbl.pbl_vbase) { | ||
2507 | ib_umem_release(region); | ||
2508 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); | ||
2509 | ibmr = ERR_PTR(-ENOMEM); | ||
2510 | kfree(nesmr); | ||
2511 | goto reg_user_mr_err; | ||
2512 | } | ||
2513 | if (1 <= root_pbl_index) { | ||
2514 | root_vpbl.pbl_vbase[root_pbl_index].pa_low = | ||
2515 | cpu_to_le32((u32)vpbl.pbl_pbase); | ||
2516 | root_vpbl.pbl_vbase[root_pbl_index].pa_high = | ||
2517 | cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32))); | ||
2518 | root_vpbl.leaf_vpbl[root_pbl_index] = vpbl; | ||
2519 | } | ||
2520 | root_pbl_index++; | ||
2521 | cur_pbl_index = 0; | ||
2522 | } | ||
2523 | if (single_page) { | ||
2524 | if (page_count != 0) { | ||
2525 | if ((last_dma_addr+4096) != | ||
2526 | (sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2527 | (page_index*4096))) | ||
2528 | single_page = 0; | ||
2529 | last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2530 | (page_index*4096); | ||
2531 | } else { | ||
2532 | first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2533 | (page_index*4096); | ||
2534 | last_dma_addr = first_dma_addr; | ||
2535 | } | ||
2536 | } | ||
2537 | |||
2538 | vpbl.pbl_vbase[cur_pbl_index].pa_low = | ||
2539 | cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2540 | (page_index*4096))); | ||
2541 | vpbl.pbl_vbase[cur_pbl_index].pa_high = | ||
2542 | cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2543 | (page_index*4096))) >> 32))); | ||
2544 | cur_pbl_index++; | ||
2545 | page_count++; | ||
2546 | } | ||
2547 | } | ||
2548 | } | ||
2549 | enough_pages: | ||
2550 | nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x," | ||
2551 | " stag_key=0x%08x\n", | ||
2552 | stag_index, driver_key, stag_key); | ||
2553 | stag = stag_index << 8; | ||
2554 | stag |= driver_key; | ||
2555 | stag += (u32)stag_key; | ||
2556 | if (stag == 0) { | ||
2557 | stag = 1; | ||
2558 | } | ||
2559 | |||
2560 | iova_start = virt; | ||
2561 | /* Make the leaf PBL the root if only one PBL */ | ||
2562 | if (root_pbl_index == 1) { | ||
2563 | root_vpbl.pbl_pbase = vpbl.pbl_pbase; | ||
2564 | } | ||
2565 | |||
2566 | if (single_page) { | ||
2567 | pbl_count = 0; | ||
2568 | } else { | ||
2569 | pbl_count = root_pbl_index; | ||
2570 | first_dma_addr = 0; | ||
2571 | } | ||
2572 | nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X," | ||
2573 | " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n", | ||
2574 | stag, (unsigned int)iova_start, | ||
2575 | (unsigned int)region_length, stag_index, | ||
2576 | (unsigned long long)region->length, pbl_count); | ||
2577 | ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl, | ||
2578 | first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start); | ||
2579 | |||
2580 | nes_debug(NES_DBG_MR, "ret=%d\n", ret); | ||
2581 | |||
2582 | if (ret == 0) { | ||
2583 | nesmr->ibmr.rkey = stag; | ||
2584 | nesmr->ibmr.lkey = stag; | ||
2585 | nesmr->mode = IWNES_MEMREG_TYPE_MEM; | ||
2586 | ibmr = &nesmr->ibmr; | ||
2587 | nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0; | ||
2588 | nesmr->pbls_used = pbl_count; | ||
2589 | if (pbl_count > 1) { | ||
2590 | nesmr->pbls_used++; | ||
2591 | } | ||
2592 | } else { | ||
2593 | ib_umem_release(region); | ||
2594 | kfree(nesmr); | ||
2595 | ibmr = ERR_PTR(-ENOMEM); | ||
2596 | } | ||
2597 | |||
2598 | reg_user_mr_err: | ||
2599 | /* free the resources */ | ||
2600 | if (root_pbl_index == 1) { | ||
2601 | pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, | ||
2602 | vpbl.pbl_pbase); | ||
2603 | } else { | ||
2604 | for (page_index=0; page_index<root_pbl_index; page_index++) { | ||
2605 | pci_free_consistent(nesdev->pcidev, 4096, | ||
2606 | root_vpbl.leaf_vpbl[page_index].pbl_vbase, | ||
2607 | root_vpbl.leaf_vpbl[page_index].pbl_pbase); | ||
2608 | } | ||
2609 | kfree(root_vpbl.leaf_vpbl); | ||
2610 | pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase, | ||
2611 | root_vpbl.pbl_pbase); | ||
2612 | } | ||
2613 | |||
2614 | nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr); | ||
2615 | |||
2616 | return ibmr; | ||
2617 | break; | ||
2618 | case IWNES_MEMREG_TYPE_QP: | ||
2619 | case IWNES_MEMREG_TYPE_CQ: | ||
2620 | nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL); | ||
2621 | if (!nespbl) { | ||
2622 | nes_debug(NES_DBG_MR, "Unable to allocate PBL\n"); | ||
2623 | ib_umem_release(region); | ||
2624 | return ERR_PTR(-ENOMEM); | ||
2625 | } | ||
2626 | nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL); | ||
2627 | if (!nesmr) { | ||
2628 | ib_umem_release(region); | ||
2629 | kfree(nespbl); | ||
2630 | nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n"); | ||
2631 | return ERR_PTR(-ENOMEM); | ||
2632 | } | ||
2633 | nesmr->region = region; | ||
2634 | nes_ucontext = to_nesucontext(pd->uobject->context); | ||
2635 | pbl_depth = region->length >> 12; | ||
2636 | pbl_depth += (region->length & (4096-1)) ? 1 : 0; | ||
2637 | nespbl->pbl_size = pbl_depth*sizeof(u64); | ||
2638 | if (req.reg_type == IWNES_MEMREG_TYPE_QP) { | ||
2639 | nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory"); | ||
2640 | } else { | ||
2641 | nes_debug(NES_DBG_MR, "Attempting to allocate CP PBL memory"); | ||
2642 | } | ||
2643 | |||
2644 | nes_debug(NES_DBG_MR, " %u bytes, %u entries.\n", | ||
2645 | nespbl->pbl_size, pbl_depth); | ||
2646 | pbl = pci_alloc_consistent(nesdev->pcidev, nespbl->pbl_size, | ||
2647 | &nespbl->pbl_pbase); | ||
2648 | if (!pbl) { | ||
2649 | ib_umem_release(region); | ||
2650 | kfree(nesmr); | ||
2651 | kfree(nespbl); | ||
2652 | nes_debug(NES_DBG_MR, "Unable to allocate PBL memory\n"); | ||
2653 | return ERR_PTR(-ENOMEM); | ||
2654 | } | ||
2655 | |||
2656 | nespbl->pbl_vbase = (u64 *)pbl; | ||
2657 | nespbl->user_base = start; | ||
2658 | nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p," | ||
2659 | " pbl_vbase=%p user_base=0x%lx\n", | ||
2660 | nespbl->pbl_size, (void *)nespbl->pbl_pbase, | ||
2661 | (void*)nespbl->pbl_vbase, nespbl->user_base); | ||
2662 | |||
2663 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | ||
2664 | for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { | ||
2665 | chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12; | ||
2666 | chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0; | ||
2667 | nespbl->page = sg_page(&chunk->page_list[0]); | ||
2668 | for (page_index=0; page_index<chunk_pages; page_index++) { | ||
2669 | ((__le32 *)pbl)[0] = cpu_to_le32((u32) | ||
2670 | (sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2671 | (page_index*4096))); | ||
2672 | ((__le32 *)pbl)[1] = cpu_to_le32(((u64) | ||
2673 | (sg_dma_address(&chunk->page_list[nmap_index])+ | ||
2674 | (page_index*4096)))>>32); | ||
2675 | nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl, | ||
2676 | (unsigned long long)*pbl, | ||
2677 | le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0])); | ||
2678 | pbl++; | ||
2679 | } | ||
2680 | } | ||
2681 | } | ||
2682 | if (req.reg_type == IWNES_MEMREG_TYPE_QP) { | ||
2683 | list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list); | ||
2684 | } else { | ||
2685 | list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list); | ||
2686 | } | ||
2687 | nesmr->ibmr.rkey = -1; | ||
2688 | nesmr->ibmr.lkey = -1; | ||
2689 | nesmr->mode = req.reg_type; | ||
2690 | return &nesmr->ibmr; | ||
2691 | break; | ||
2692 | } | ||
2693 | |||
2694 | return ERR_PTR(-ENOSYS); | ||
2695 | } | ||
2696 | |||
2697 | |||
2698 | /** | ||
2699 | * nes_dereg_mr | ||
2700 | */ | ||
2701 | static int nes_dereg_mr(struct ib_mr *ib_mr) | ||
2702 | { | ||
2703 | struct nes_mr *nesmr = to_nesmr(ib_mr); | ||
2704 | struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device); | ||
2705 | struct nes_device *nesdev = nesvnic->nesdev; | ||
2706 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
2707 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
2708 | struct nes_cqp_request *cqp_request; | ||
2709 | unsigned long flags; | ||
2710 | int ret; | ||
2711 | u16 major_code; | ||
2712 | u16 minor_code; | ||
2713 | |||
2714 | if (nesmr->region) { | ||
2715 | ib_umem_release(nesmr->region); | ||
2716 | } | ||
2717 | if (nesmr->mode != IWNES_MEMREG_TYPE_MEM) { | ||
2718 | kfree(nesmr); | ||
2719 | return 0; | ||
2720 | } | ||
2721 | |||
2722 | /* Deallocate the region with the adapter */ | ||
2723 | |||
2724 | cqp_request = nes_get_cqp_request(nesdev); | ||
2725 | if (cqp_request == NULL) { | ||
2726 | nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n"); | ||
2727 | return -ENOMEM; | ||
2728 | } | ||
2729 | cqp_request->waiting = 1; | ||
2730 | cqp_wqe = &cqp_request->cqp_wqe; | ||
2731 | |||
2732 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
2733 | if (nesmr->pbls_used != 0) { | ||
2734 | if (nesmr->pbl_4k) { | ||
2735 | nesadapter->free_4kpbl += nesmr->pbls_used; | ||
2736 | if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) { | ||
2737 | printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n", | ||
2738 | nesadapter->free_4kpbl, nesadapter->max_4kpbl); | ||
2739 | } | ||
2740 | } else { | ||
2741 | nesadapter->free_256pbl += nesmr->pbls_used; | ||
2742 | if (nesadapter->free_256pbl > nesadapter->max_256pbl) { | ||
2743 | printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n", | ||
2744 | nesadapter->free_256pbl, nesadapter->max_256pbl); | ||
2745 | } | ||
2746 | } | ||
2747 | } | ||
2748 | |||
2749 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
2750 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
2751 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
2752 | NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO | | ||
2753 | NES_CQP_STAG_DEALLOC_PBLS | NES_CQP_STAG_MR); | ||
2754 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ib_mr->rkey); | ||
2755 | |||
2756 | atomic_set(&cqp_request->refcount, 2); | ||
2757 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
2758 | |||
2759 | /* Wait for CQP */ | ||
2760 | nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey); | ||
2761 | ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), | ||
2762 | NES_EVENT_TIMEOUT); | ||
2763 | nes_debug(NES_DBG_MR, "Deallocate STag 0x%08X completed, wait_event_timeout ret = %u," | ||
2764 | " CQP Major:Minor codes = 0x%04X:0x%04X\n", | ||
2765 | ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code); | ||
2766 | |||
2767 | nes_free_resource(nesadapter, nesadapter->allocated_mrs, | ||
2768 | (ib_mr->rkey & 0x0fffff00) >> 8); | ||
2769 | |||
2770 | kfree(nesmr); | ||
2771 | |||
2772 | major_code = cqp_request->major_code; | ||
2773 | minor_code = cqp_request->minor_code; | ||
2774 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
2775 | if (cqp_request->dynamic) { | ||
2776 | kfree(cqp_request); | ||
2777 | } else { | ||
2778 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2779 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2780 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2781 | } | ||
2782 | } | ||
2783 | if (!ret) { | ||
2784 | nes_debug(NES_DBG_MR, "Timeout waiting to destroy STag," | ||
2785 | " ib_mr=%p, rkey = 0x%08X\n", | ||
2786 | ib_mr, ib_mr->rkey); | ||
2787 | return -ETIME; | ||
2788 | } else if (major_code) { | ||
2789 | nes_debug(NES_DBG_MR, "Error (0x%04X:0x%04X) while attempting" | ||
2790 | " to destroy STag, ib_mr=%p, rkey = 0x%08X\n", | ||
2791 | major_code, minor_code, ib_mr, ib_mr->rkey); | ||
2792 | return -EIO; | ||
2793 | } else | ||
2794 | return 0; | ||
2795 | } | ||
2796 | |||
2797 | |||
2798 | /** | ||
2799 | * show_rev | ||
2800 | */ | ||
2801 | static ssize_t show_rev(struct class_device *cdev, char *buf) | ||
2802 | { | ||
2803 | struct nes_ib_device *nesibdev = | ||
2804 | container_of(cdev, struct nes_ib_device, ibdev.class_dev); | ||
2805 | struct nes_vnic *nesvnic = nesibdev->nesvnic; | ||
2806 | |||
2807 | nes_debug(NES_DBG_INIT, "\n"); | ||
2808 | return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev); | ||
2809 | } | ||
2810 | |||
2811 | |||
2812 | /** | ||
2813 | * show_fw_ver | ||
2814 | */ | ||
2815 | static ssize_t show_fw_ver(struct class_device *cdev, char *buf) | ||
2816 | { | ||
2817 | struct nes_ib_device *nesibdev = | ||
2818 | container_of(cdev, struct nes_ib_device, ibdev.class_dev); | ||
2819 | struct nes_vnic *nesvnic = nesibdev->nesvnic; | ||
2820 | |||
2821 | nes_debug(NES_DBG_INIT, "\n"); | ||
2822 | return sprintf(buf, "%x.%x.%x\n", | ||
2823 | (int)(nesvnic->nesdev->nesadapter->fw_ver >> 32), | ||
2824 | (int)(nesvnic->nesdev->nesadapter->fw_ver >> 16) & 0xffff, | ||
2825 | (int)(nesvnic->nesdev->nesadapter->fw_ver & 0xffff)); | ||
2826 | } | ||
2827 | |||
2828 | |||
2829 | /** | ||
2830 | * show_hca | ||
2831 | */ | ||
2832 | static ssize_t show_hca(struct class_device *cdev, char *buf) | ||
2833 | { | ||
2834 | nes_debug(NES_DBG_INIT, "\n"); | ||
2835 | return sprintf(buf, "NES020\n"); | ||
2836 | } | ||
2837 | |||
2838 | |||
2839 | /** | ||
2840 | * show_board | ||
2841 | */ | ||
2842 | static ssize_t show_board(struct class_device *cdev, char *buf) | ||
2843 | { | ||
2844 | nes_debug(NES_DBG_INIT, "\n"); | ||
2845 | return sprintf(buf, "%.*s\n", 32, "NES020 Board ID"); | ||
2846 | } | ||
2847 | |||
2848 | |||
2849 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | ||
2850 | static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | ||
2851 | static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); | ||
2852 | static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); | ||
2853 | |||
2854 | static struct class_device_attribute *nes_class_attributes[] = { | ||
2855 | &class_device_attr_hw_rev, | ||
2856 | &class_device_attr_fw_ver, | ||
2857 | &class_device_attr_hca_type, | ||
2858 | &class_device_attr_board_id | ||
2859 | }; | ||
2860 | |||
2861 | |||
2862 | /** | ||
2863 | * nes_query_qp | ||
2864 | */ | ||
2865 | static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
2866 | int attr_mask, struct ib_qp_init_attr *init_attr) | ||
2867 | { | ||
2868 | struct nes_qp *nesqp = to_nesqp(ibqp); | ||
2869 | |||
2870 | nes_debug(NES_DBG_QP, "\n"); | ||
2871 | |||
2872 | attr->qp_access_flags = 0; | ||
2873 | attr->cap.max_send_wr = nesqp->hwqp.sq_size; | ||
2874 | attr->cap.max_recv_wr = nesqp->hwqp.rq_size; | ||
2875 | attr->cap.max_recv_sge = 1; | ||
2876 | if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { | ||
2877 | init_attr->cap.max_inline_data = 0; | ||
2878 | } else { | ||
2879 | init_attr->cap.max_inline_data = 64; | ||
2880 | } | ||
2881 | |||
2882 | init_attr->event_handler = nesqp->ibqp.event_handler; | ||
2883 | init_attr->qp_context = nesqp->ibqp.qp_context; | ||
2884 | init_attr->send_cq = nesqp->ibqp.send_cq; | ||
2885 | init_attr->recv_cq = nesqp->ibqp.recv_cq; | ||
2886 | init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq; | ||
2887 | init_attr->cap = attr->cap; | ||
2888 | |||
2889 | return 0; | ||
2890 | } | ||
2891 | |||
2892 | |||
2893 | /** | ||
2894 | * nes_hw_modify_qp | ||
2895 | */ | ||
2896 | int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, | ||
2897 | u32 next_iwarp_state, u32 wait_completion) | ||
2898 | { | ||
2899 | struct nes_hw_cqp_wqe *cqp_wqe; | ||
2900 | /* struct iw_cm_id *cm_id = nesqp->cm_id; */ | ||
2901 | /* struct iw_cm_event cm_event; */ | ||
2902 | struct nes_cqp_request *cqp_request; | ||
2903 | unsigned long flags; | ||
2904 | int ret; | ||
2905 | u16 major_code; | ||
2906 | |||
2907 | nes_debug(NES_DBG_MOD_QP, "QP%u, refcount=%d\n", | ||
2908 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount)); | ||
2909 | |||
2910 | cqp_request = nes_get_cqp_request(nesdev); | ||
2911 | if (cqp_request == NULL) { | ||
2912 | nes_debug(NES_DBG_MOD_QP, "Failed to get a cqp_request.\n"); | ||
2913 | return -ENOMEM; | ||
2914 | } | ||
2915 | if (wait_completion) { | ||
2916 | cqp_request->waiting = 1; | ||
2917 | } else { | ||
2918 | cqp_request->waiting = 0; | ||
2919 | } | ||
2920 | cqp_wqe = &cqp_request->cqp_wqe; | ||
2921 | |||
2922 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, | ||
2923 | NES_CQP_MODIFY_QP | NES_CQP_QP_TYPE_IWARP | next_iwarp_state); | ||
2924 | nes_debug(NES_DBG_MOD_QP, "using next_iwarp_state=%08x, wqe_words=%08x\n", | ||
2925 | next_iwarp_state, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])); | ||
2926 | nes_fill_init_cqp_wqe(cqp_wqe, nesdev); | ||
2927 | set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); | ||
2928 | set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); | ||
2929 | |||
2930 | atomic_set(&cqp_request->refcount, 2); | ||
2931 | nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL); | ||
2932 | |||
2933 | /* Wait for CQP */ | ||
2934 | if (wait_completion) { | ||
2935 | /* nes_debug(NES_DBG_MOD_QP, "Waiting for modify iWARP QP%u to complete.\n", | ||
2936 | nesqp->hwqp.qp_id); */ | ||
2937 | ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), | ||
2938 | NES_EVENT_TIMEOUT); | ||
2939 | nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u completed, wait_event_timeout ret=%u, " | ||
2940 | "CQP Major:Minor codes = 0x%04X:0x%04X.\n", | ||
2941 | nesqp->hwqp.qp_id, ret, cqp_request->major_code, cqp_request->minor_code); | ||
2942 | major_code = cqp_request->major_code; | ||
2943 | if (major_code) { | ||
2944 | nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u failed" | ||
2945 | "CQP Major:Minor codes = 0x%04X:0x%04X, intended next state = 0x%08X.\n", | ||
2946 | nesqp->hwqp.qp_id, cqp_request->major_code, | ||
2947 | cqp_request->minor_code, next_iwarp_state); | ||
2948 | } | ||
2949 | if (atomic_dec_and_test(&cqp_request->refcount)) { | ||
2950 | if (cqp_request->dynamic) { | ||
2951 | kfree(cqp_request); | ||
2952 | } else { | ||
2953 | spin_lock_irqsave(&nesdev->cqp.lock, flags); | ||
2954 | list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs); | ||
2955 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | ||
2956 | } | ||
2957 | } | ||
2958 | if (!ret) | ||
2959 | return -ETIME; | ||
2960 | else if (major_code) | ||
2961 | return -EIO; | ||
2962 | else | ||
2963 | return 0; | ||
2964 | } else { | ||
2965 | return 0; | ||
2966 | } | ||
2967 | } | ||
2968 | |||
2969 | |||
2970 | /** | ||
2971 | * nes_modify_qp | ||
2972 | */ | ||
2973 | int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
2974 | int attr_mask, struct ib_udata *udata) | ||
2975 | { | ||
2976 | struct nes_qp *nesqp = to_nesqp(ibqp); | ||
2977 | struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); | ||
2978 | struct nes_device *nesdev = nesvnic->nesdev; | ||
2979 | /* u32 cqp_head; */ | ||
2980 | /* u32 counter; */ | ||
2981 | u32 next_iwarp_state = 0; | ||
2982 | int err; | ||
2983 | unsigned long qplockflags; | ||
2984 | int ret; | ||
2985 | u16 original_last_aeq; | ||
2986 | u8 issue_modify_qp = 0; | ||
2987 | u8 issue_disconnect = 0; | ||
2988 | u8 dont_wait = 0; | ||
2989 | |||
2990 | nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u," | ||
2991 | " iwarp_state=0x%X, refcount=%d\n", | ||
2992 | nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state, | ||
2993 | nesqp->iwarp_state, atomic_read(&nesqp->refcount)); | ||
2994 | |||
2995 | nes_add_ref(&nesqp->ibqp); | ||
2996 | spin_lock_irqsave(&nesqp->lock, qplockflags); | ||
2997 | |||
2998 | nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X," | ||
2999 | " QP Access Flags=0x%X, attr_mask = 0x%0x\n", | ||
3000 | nesqp->hwqp.qp_id, nesqp->hw_iwarp_state, | ||
3001 | nesqp->hw_tcp_state, attr->qp_access_flags, attr_mask); | ||
3002 | |||
3003 | if (attr_mask & IB_QP_STATE) { | ||
3004 | switch (attr->qp_state) { | ||
3005 | case IB_QPS_INIT: | ||
3006 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = init\n", | ||
3007 | nesqp->hwqp.qp_id); | ||
3008 | if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) { | ||
3009 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3010 | nes_rem_ref(&nesqp->ibqp); | ||
3011 | return -EINVAL; | ||
3012 | } | ||
3013 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE; | ||
3014 | issue_modify_qp = 1; | ||
3015 | break; | ||
3016 | case IB_QPS_RTR: | ||
3017 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rtr\n", | ||
3018 | nesqp->hwqp.qp_id); | ||
3019 | if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) { | ||
3020 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3021 | nes_rem_ref(&nesqp->ibqp); | ||
3022 | return -EINVAL; | ||
3023 | } | ||
3024 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE; | ||
3025 | issue_modify_qp = 1; | ||
3026 | break; | ||
3027 | case IB_QPS_RTS: | ||
3028 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rts\n", | ||
3029 | nesqp->hwqp.qp_id); | ||
3030 | if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) { | ||
3031 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3032 | nes_rem_ref(&nesqp->ibqp); | ||
3033 | return -EINVAL; | ||
3034 | } | ||
3035 | if (nesqp->cm_id == NULL) { | ||
3036 | nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n", | ||
3037 | nesqp->hwqp.qp_id ); | ||
3038 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3039 | nes_rem_ref(&nesqp->ibqp); | ||
3040 | return -EINVAL; | ||
3041 | } | ||
3042 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS; | ||
3043 | if (nesqp->iwarp_state != NES_CQP_QP_IWARP_STATE_RTS) | ||
3044 | next_iwarp_state |= NES_CQP_QP_CONTEXT_VALID | | ||
3045 | NES_CQP_QP_ARP_VALID | NES_CQP_QP_ORD_VALID; | ||
3046 | issue_modify_qp = 1; | ||
3047 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_ESTABLISHED; | ||
3048 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_RTS; | ||
3049 | nesqp->hte_added = 1; | ||
3050 | break; | ||
3051 | case IB_QPS_SQD: | ||
3052 | issue_modify_qp = 1; | ||
3053 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state=closing. SQ head=%u, SQ tail=%u\n", | ||
3054 | nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail); | ||
3055 | if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) { | ||
3056 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3057 | nes_rem_ref(&nesqp->ibqp); | ||
3058 | return 0; | ||
3059 | } else { | ||
3060 | if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) { | ||
3061 | nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing" | ||
3062 | " ignored due to current iWARP state\n", | ||
3063 | nesqp->hwqp.qp_id); | ||
3064 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3065 | nes_rem_ref(&nesqp->ibqp); | ||
3066 | return -EINVAL; | ||
3067 | } | ||
3068 | if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) { | ||
3069 | nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing" | ||
3070 | " already done based on hw state.\n", | ||
3071 | nesqp->hwqp.qp_id); | ||
3072 | issue_modify_qp = 0; | ||
3073 | nesqp->in_disconnect = 0; | ||
3074 | } | ||
3075 | switch (nesqp->hw_iwarp_state) { | ||
3076 | case NES_AEQE_IWARP_STATE_CLOSING: | ||
3077 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; | ||
3078 | case NES_AEQE_IWARP_STATE_TERMINATE: | ||
3079 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; | ||
3080 | break; | ||
3081 | case NES_AEQE_IWARP_STATE_ERROR: | ||
3082 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; | ||
3083 | break; | ||
3084 | default: | ||
3085 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; | ||
3086 | nesqp->in_disconnect = 1; | ||
3087 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; | ||
3088 | break; | ||
3089 | } | ||
3090 | } | ||
3091 | break; | ||
3092 | case IB_QPS_SQE: | ||
3093 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = terminate\n", | ||
3094 | nesqp->hwqp.qp_id); | ||
3095 | if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) { | ||
3096 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3097 | nes_rem_ref(&nesqp->ibqp); | ||
3098 | return -EINVAL; | ||
3099 | } | ||
3100 | /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ | ||
3101 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; | ||
3102 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; | ||
3103 | issue_modify_qp = 1; | ||
3104 | nesqp->in_disconnect = 1; | ||
3105 | break; | ||
3106 | case IB_QPS_ERR: | ||
3107 | case IB_QPS_RESET: | ||
3108 | if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) { | ||
3109 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3110 | nes_rem_ref(&nesqp->ibqp); | ||
3111 | return -EINVAL; | ||
3112 | } | ||
3113 | nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", | ||
3114 | nesqp->hwqp.qp_id); | ||
3115 | next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; | ||
3116 | /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ | ||
3117 | if (nesqp->hte_added) { | ||
3118 | nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n"); | ||
3119 | next_iwarp_state |= NES_CQP_QP_DEL_HTE; | ||
3120 | nesqp->hte_added = 0; | ||
3121 | } | ||
3122 | if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) && | ||
3123 | (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) { | ||
3124 | next_iwarp_state |= NES_CQP_QP_RESET; | ||
3125 | nesqp->in_disconnect = 1; | ||
3126 | } else { | ||
3127 | nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n", | ||
3128 | nesqp->hwqp.qp_id, nesqp->hw_tcp_state); | ||
3129 | dont_wait = 1; | ||
3130 | } | ||
3131 | issue_modify_qp = 1; | ||
3132 | nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; | ||
3133 | break; | ||
3134 | default: | ||
3135 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3136 | nes_rem_ref(&nesqp->ibqp); | ||
3137 | return -EINVAL; | ||
3138 | break; | ||
3139 | } | ||
3140 | |||
3141 | nesqp->ibqp_state = attr->qp_state; | ||
3142 | if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == | ||
3143 | (u32)NES_CQP_QP_IWARP_STATE_RTS) && | ||
3144 | ((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) > | ||
3145 | (u32)NES_CQP_QP_IWARP_STATE_RTS)) { | ||
3146 | nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; | ||
3147 | nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", | ||
3148 | nesqp->iwarp_state); | ||
3149 | issue_disconnect = 1; | ||
3150 | } else { | ||
3151 | nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; | ||
3152 | nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", | ||
3153 | nesqp->iwarp_state); | ||
3154 | } | ||
3155 | } | ||
3156 | |||
3157 | if (attr_mask & IB_QP_ACCESS_FLAGS) { | ||
3158 | if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) { | ||
3159 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN | | ||
3160 | NES_QPCONTEXT_MISC_RDMA_READ_EN); | ||
3161 | issue_modify_qp = 1; | ||
3162 | } | ||
3163 | if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) { | ||
3164 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN); | ||
3165 | issue_modify_qp = 1; | ||
3166 | } | ||
3167 | if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) { | ||
3168 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_READ_EN); | ||
3169 | issue_modify_qp = 1; | ||
3170 | } | ||
3171 | if (attr->qp_access_flags & IB_ACCESS_MW_BIND) { | ||
3172 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WBIND_EN); | ||
3173 | issue_modify_qp = 1; | ||
3174 | } | ||
3175 | |||
3176 | if (nesqp->user_mode) { | ||
3177 | nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN | | ||
3178 | NES_QPCONTEXT_MISC_RDMA_READ_EN); | ||
3179 | issue_modify_qp = 1; | ||
3180 | } | ||
3181 | } | ||
3182 | |||
3183 | original_last_aeq = nesqp->last_aeq; | ||
3184 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3185 | |||
3186 | nes_debug(NES_DBG_MOD_QP, "issue_modify_qp=%u\n", issue_modify_qp); | ||
3187 | |||
3188 | ret = 0; | ||
3189 | |||
3190 | |||
3191 | if (issue_modify_qp) { | ||
3192 | nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); | ||
3193 | ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); | ||
3194 | if (ret) | ||
3195 | nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" | ||
3196 | " failed for QP%u.\n", | ||
3197 | next_iwarp_state, nesqp->hwqp.qp_id); | ||
3198 | |||
3199 | } | ||
3200 | |||
3201 | if ((issue_modify_qp) && (nesqp->ibqp_state > IB_QPS_RTS)) { | ||
3202 | nes_debug(NES_DBG_MOD_QP, "QP%u Issued ModifyQP refcount (%d)," | ||
3203 | " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", | ||
3204 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
3205 | original_last_aeq, nesqp->last_aeq); | ||
3206 | if ((!ret) || | ||
3207 | ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) && | ||
3208 | (ret))) { | ||
3209 | if (dont_wait) { | ||
3210 | if (nesqp->cm_id && nesqp->hw_tcp_state != 0) { | ||
3211 | nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d)," | ||
3212 | " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", | ||
3213 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
3214 | original_last_aeq, nesqp->last_aeq); | ||
3215 | /* this one is for the cm_disconnect thread */ | ||
3216 | nes_add_ref(&nesqp->ibqp); | ||
3217 | spin_lock_irqsave(&nesqp->lock, qplockflags); | ||
3218 | nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; | ||
3219 | nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; | ||
3220 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3221 | nes_cm_disconn(nesqp); | ||
3222 | } else { | ||
3223 | nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n", | ||
3224 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount)); | ||
3225 | nes_rem_ref(&nesqp->ibqp); | ||
3226 | } | ||
3227 | } else { | ||
3228 | spin_lock_irqsave(&nesqp->lock, qplockflags); | ||
3229 | if (nesqp->cm_id) { | ||
3230 | /* These two are for the timer thread */ | ||
3231 | if (atomic_inc_return(&nesqp->close_timer_started) == 1) { | ||
3232 | nes_add_ref(&nesqp->ibqp); | ||
3233 | nesqp->cm_id->add_ref(nesqp->cm_id); | ||
3234 | nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d)," | ||
3235 | " need ae to finish up, original_last_aeq = 0x%04X." | ||
3236 | " last_aeq = 0x%04X, scheduling timer.\n", | ||
3237 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
3238 | original_last_aeq, nesqp->last_aeq); | ||
3239 | schedule_nes_timer(nesqp->cm_node, (struct sk_buff *) nesqp, NES_TIMER_TYPE_CLOSE, 1, 0); | ||
3240 | } | ||
3241 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3242 | } else { | ||
3243 | spin_unlock_irqrestore(&nesqp->lock, qplockflags); | ||
3244 | nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d)," | ||
3245 | " need ae to finish up, original_last_aeq = 0x%04X." | ||
3246 | " last_aeq = 0x%04X.\n", | ||
3247 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
3248 | original_last_aeq, nesqp->last_aeq); | ||
3249 | } | ||
3250 | } | ||
3251 | } else { | ||
3252 | nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up," | ||
3253 | " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", | ||
3254 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
3255 | original_last_aeq, nesqp->last_aeq); | ||
3256 | nes_rem_ref(&nesqp->ibqp); | ||
3257 | } | ||
3258 | } else { | ||
3259 | nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up," | ||
3260 | " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", | ||
3261 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), | ||
3262 | original_last_aeq, nesqp->last_aeq); | ||
3263 | nes_rem_ref(&nesqp->ibqp); | ||
3264 | } | ||
3265 | |||
3266 | err = 0; | ||
3267 | |||
3268 | nes_debug(NES_DBG_MOD_QP, "QP%u Leaving, refcount=%d\n", | ||
3269 | nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount)); | ||
3270 | |||
3271 | return err; | ||
3272 | } | ||
3273 | |||
3274 | |||
3275 | /** | ||
3276 | * nes_muticast_attach | ||
3277 | */ | ||
3278 | static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
3279 | { | ||
3280 | nes_debug(NES_DBG_INIT, "\n"); | ||
3281 | return -ENOSYS; | ||
3282 | } | ||
3283 | |||
3284 | |||
3285 | /** | ||
3286 | * nes_multicast_detach | ||
3287 | */ | ||
3288 | static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
3289 | { | ||
3290 | nes_debug(NES_DBG_INIT, "\n"); | ||
3291 | return -ENOSYS; | ||
3292 | } | ||
3293 | |||
3294 | |||
3295 | /** | ||
3296 | * nes_process_mad | ||
3297 | */ | ||
3298 | static int nes_process_mad(struct ib_device *ibdev, int mad_flags, | ||
3299 | u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, | ||
3300 | struct ib_mad *in_mad, struct ib_mad *out_mad) | ||
3301 | { | ||
3302 | nes_debug(NES_DBG_INIT, "\n"); | ||
3303 | return -ENOSYS; | ||
3304 | } | ||
3305 | |||
3306 | static inline void | ||
3307 | fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey) | ||
3308 | { | ||
3309 | int sge_index; | ||
3310 | int total_payload_length = 0; | ||
3311 | for (sge_index = 0; sge_index < ib_wr->num_sge; sge_index++) { | ||
3312 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4), | ||
3313 | ib_wr->sg_list[sge_index].addr); | ||
3314 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_LENGTH0_IDX + (sge_index*4), | ||
3315 | ib_wr->sg_list[sge_index].length); | ||
3316 | if (uselkey) | ||
3317 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), | ||
3318 | (ib_wr->sg_list[sge_index].lkey)); | ||
3319 | else | ||
3320 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), 0); | ||
3321 | |||
3322 | total_payload_length += ib_wr->sg_list[sge_index].length; | ||
3323 | } | ||
3324 | nes_debug(NES_DBG_IW_TX, "UC UC UC, sending total_payload_length=%u \n", | ||
3325 | total_payload_length); | ||
3326 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX, | ||
3327 | total_payload_length); | ||
3328 | } | ||
3329 | |||
3330 | /** | ||
3331 | * nes_post_send | ||
3332 | */ | ||
3333 | static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | ||
3334 | struct ib_send_wr **bad_wr) | ||
3335 | { | ||
3336 | u64 u64temp; | ||
3337 | unsigned long flags = 0; | ||
3338 | struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); | ||
3339 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3340 | struct nes_qp *nesqp = to_nesqp(ibqp); | ||
3341 | struct nes_hw_qp_wqe *wqe; | ||
3342 | int err; | ||
3343 | u32 qsize = nesqp->hwqp.sq_size; | ||
3344 | u32 head; | ||
3345 | u32 wqe_misc; | ||
3346 | u32 wqe_count; | ||
3347 | u32 counter; | ||
3348 | u32 total_payload_length; | ||
3349 | |||
3350 | err = 0; | ||
3351 | wqe_misc = 0; | ||
3352 | wqe_count = 0; | ||
3353 | total_payload_length = 0; | ||
3354 | |||
3355 | if (nesqp->ibqp_state > IB_QPS_RTS) | ||
3356 | return -EINVAL; | ||
3357 | |||
3358 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3359 | |||
3360 | head = nesqp->hwqp.sq_head; | ||
3361 | |||
3362 | while (ib_wr) { | ||
3363 | /* Check for SQ overflow */ | ||
3364 | if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { | ||
3365 | err = -EINVAL; | ||
3366 | break; | ||
3367 | } | ||
3368 | |||
3369 | wqe = &nesqp->hwqp.sq_vbase[head]; | ||
3370 | /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n", | ||
3371 | nesqp->hwqp.qp_id, wqe, head); */ | ||
3372 | nes_fill_init_qp_wqe(wqe, nesqp, head); | ||
3373 | u64temp = (u64)(ib_wr->wr_id); | ||
3374 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, | ||
3375 | u64temp); | ||
3376 | switch (ib_wr->opcode) { | ||
3377 | case IB_WR_SEND: | ||
3378 | if (ib_wr->send_flags & IB_SEND_SOLICITED) { | ||
3379 | wqe_misc = NES_IWARP_SQ_OP_SENDSE; | ||
3380 | } else { | ||
3381 | wqe_misc = NES_IWARP_SQ_OP_SEND; | ||
3382 | } | ||
3383 | if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { | ||
3384 | err = -EINVAL; | ||
3385 | break; | ||
3386 | } | ||
3387 | if (ib_wr->send_flags & IB_SEND_FENCE) { | ||
3388 | wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; | ||
3389 | } | ||
3390 | if ((ib_wr->send_flags & IB_SEND_INLINE) && | ||
3391 | ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) && | ||
3392 | (ib_wr->sg_list[0].length <= 64)) { | ||
3393 | memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX], | ||
3394 | (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length); | ||
3395 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX, | ||
3396 | ib_wr->sg_list[0].length); | ||
3397 | wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA; | ||
3398 | } else { | ||
3399 | fill_wqe_sg_send(wqe, ib_wr, 1); | ||
3400 | } | ||
3401 | |||
3402 | break; | ||
3403 | case IB_WR_RDMA_WRITE: | ||
3404 | wqe_misc = NES_IWARP_SQ_OP_RDMAW; | ||
3405 | if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { | ||
3406 | nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n", | ||
3407 | ib_wr->num_sge, | ||
3408 | nesdev->nesadapter->max_sge); | ||
3409 | err = -EINVAL; | ||
3410 | break; | ||
3411 | } | ||
3412 | if (ib_wr->send_flags & IB_SEND_FENCE) { | ||
3413 | wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; | ||
3414 | } | ||
3415 | |||
3416 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, | ||
3417 | ib_wr->wr.rdma.rkey); | ||
3418 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, | ||
3419 | ib_wr->wr.rdma.remote_addr); | ||
3420 | |||
3421 | if ((ib_wr->send_flags & IB_SEND_INLINE) && | ||
3422 | ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) && | ||
3423 | (ib_wr->sg_list[0].length <= 64)) { | ||
3424 | memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX], | ||
3425 | (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length); | ||
3426 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX, | ||
3427 | ib_wr->sg_list[0].length); | ||
3428 | wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA; | ||
3429 | } else { | ||
3430 | fill_wqe_sg_send(wqe, ib_wr, 1); | ||
3431 | } | ||
3432 | wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = | ||
3433 | wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]; | ||
3434 | break; | ||
3435 | case IB_WR_RDMA_READ: | ||
3436 | /* iWARP only supports 1 sge for RDMA reads */ | ||
3437 | if (ib_wr->num_sge > 1) { | ||
3438 | nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n", | ||
3439 | ib_wr->num_sge); | ||
3440 | err = -EINVAL; | ||
3441 | break; | ||
3442 | } | ||
3443 | wqe_misc = NES_IWARP_SQ_OP_RDMAR; | ||
3444 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, | ||
3445 | ib_wr->wr.rdma.remote_addr); | ||
3446 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, | ||
3447 | ib_wr->wr.rdma.rkey); | ||
3448 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX, | ||
3449 | ib_wr->sg_list->length); | ||
3450 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, | ||
3451 | ib_wr->sg_list->addr); | ||
3452 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX, | ||
3453 | ib_wr->sg_list->lkey); | ||
3454 | break; | ||
3455 | default: | ||
3456 | /* error */ | ||
3457 | err = -EINVAL; | ||
3458 | break; | ||
3459 | } | ||
3460 | |||
3461 | if (ib_wr->send_flags & IB_SEND_SIGNALED) { | ||
3462 | wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; | ||
3463 | } | ||
3464 | wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc); | ||
3465 | |||
3466 | ib_wr = ib_wr->next; | ||
3467 | head++; | ||
3468 | wqe_count++; | ||
3469 | if (head >= qsize) | ||
3470 | head = 0; | ||
3471 | |||
3472 | } | ||
3473 | |||
3474 | nesqp->hwqp.sq_head = head; | ||
3475 | barrier(); | ||
3476 | while (wqe_count) { | ||
3477 | counter = min(wqe_count, ((u32)255)); | ||
3478 | wqe_count -= counter; | ||
3479 | nes_write32(nesdev->regs + NES_WQE_ALLOC, | ||
3480 | (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); | ||
3481 | } | ||
3482 | |||
3483 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3484 | |||
3485 | if (err) | ||
3486 | *bad_wr = ib_wr; | ||
3487 | return err; | ||
3488 | } | ||
3489 | |||
3490 | |||
3491 | /** | ||
3492 | * nes_post_recv | ||
3493 | */ | ||
3494 | static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, | ||
3495 | struct ib_recv_wr **bad_wr) | ||
3496 | { | ||
3497 | u64 u64temp; | ||
3498 | unsigned long flags = 0; | ||
3499 | struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); | ||
3500 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3501 | struct nes_qp *nesqp = to_nesqp(ibqp); | ||
3502 | struct nes_hw_qp_wqe *wqe; | ||
3503 | int err = 0; | ||
3504 | int sge_index; | ||
3505 | u32 qsize = nesqp->hwqp.rq_size; | ||
3506 | u32 head; | ||
3507 | u32 wqe_count = 0; | ||
3508 | u32 counter; | ||
3509 | u32 total_payload_length; | ||
3510 | |||
3511 | if (nesqp->ibqp_state > IB_QPS_RTS) | ||
3512 | return -EINVAL; | ||
3513 | |||
3514 | spin_lock_irqsave(&nesqp->lock, flags); | ||
3515 | |||
3516 | head = nesqp->hwqp.rq_head; | ||
3517 | |||
3518 | while (ib_wr) { | ||
3519 | if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { | ||
3520 | err = -EINVAL; | ||
3521 | break; | ||
3522 | } | ||
3523 | /* Check for RQ overflow */ | ||
3524 | if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) { | ||
3525 | err = -EINVAL; | ||
3526 | break; | ||
3527 | } | ||
3528 | |||
3529 | nes_debug(NES_DBG_IW_RX, "ibwr sge count = %u.\n", ib_wr->num_sge); | ||
3530 | wqe = &nesqp->hwqp.rq_vbase[head]; | ||
3531 | |||
3532 | /* nes_debug(NES_DBG_IW_RX, "QP%u:processing rq wqe at %p, head = %u.\n", | ||
3533 | nesqp->hwqp.qp_id, wqe, head); */ | ||
3534 | nes_fill_init_qp_wqe(wqe, nesqp, head); | ||
3535 | u64temp = (u64)(ib_wr->wr_id); | ||
3536 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, | ||
3537 | u64temp); | ||
3538 | total_payload_length = 0; | ||
3539 | for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) { | ||
3540 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4), | ||
3541 | ib_wr->sg_list[sge_index].addr); | ||
3542 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4), | ||
3543 | ib_wr->sg_list[sge_index].length); | ||
3544 | set_wqe_32bit_value(wqe->wqe_words,NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4), | ||
3545 | ib_wr->sg_list[sge_index].lkey); | ||
3546 | |||
3547 | total_payload_length += ib_wr->sg_list[sge_index].length; | ||
3548 | } | ||
3549 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX, | ||
3550 | total_payload_length); | ||
3551 | |||
3552 | ib_wr = ib_wr->next; | ||
3553 | head++; | ||
3554 | wqe_count++; | ||
3555 | if (head >= qsize) | ||
3556 | head = 0; | ||
3557 | } | ||
3558 | |||
3559 | nesqp->hwqp.rq_head = head; | ||
3560 | barrier(); | ||
3561 | while (wqe_count) { | ||
3562 | counter = min(wqe_count, ((u32)255)); | ||
3563 | wqe_count -= counter; | ||
3564 | nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); | ||
3565 | } | ||
3566 | |||
3567 | spin_unlock_irqrestore(&nesqp->lock, flags); | ||
3568 | |||
3569 | if (err) | ||
3570 | *bad_wr = ib_wr; | ||
3571 | return err; | ||
3572 | } | ||
3573 | |||
3574 | |||
3575 | /** | ||
3576 | * nes_poll_cq | ||
3577 | */ | ||
3578 | static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | ||
3579 | { | ||
3580 | u64 u64temp; | ||
3581 | u64 wrid; | ||
3582 | /* u64 u64temp; */ | ||
3583 | unsigned long flags = 0; | ||
3584 | struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); | ||
3585 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3586 | struct nes_cq *nescq = to_nescq(ibcq); | ||
3587 | struct nes_qp *nesqp; | ||
3588 | struct nes_hw_cqe cqe; | ||
3589 | u32 head; | ||
3590 | u32 wq_tail; | ||
3591 | u32 cq_size; | ||
3592 | u32 cqe_count = 0; | ||
3593 | u32 wqe_index; | ||
3594 | u32 u32temp; | ||
3595 | /* u32 counter; */ | ||
3596 | |||
3597 | nes_debug(NES_DBG_CQ, "\n"); | ||
3598 | |||
3599 | spin_lock_irqsave(&nescq->lock, flags); | ||
3600 | |||
3601 | head = nescq->hw_cq.cq_head; | ||
3602 | cq_size = nescq->hw_cq.cq_size; | ||
3603 | |||
3604 | while (cqe_count < num_entries) { | ||
3605 | if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & | ||
3606 | NES_CQE_VALID) { | ||
3607 | cqe = nescq->hw_cq.cq_vbase[head]; | ||
3608 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; | ||
3609 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); | ||
3610 | wqe_index = u32temp & | ||
3611 | (nesdev->nesadapter->max_qp_wr - 1); | ||
3612 | u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); | ||
3613 | /* parse CQE, get completion context from WQE (either rq or sq */ | ||
3614 | u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | | ||
3615 | ((u64)u32temp); | ||
3616 | nesqp = *((struct nes_qp **)&u64temp); | ||
3617 | memset(entry, 0, sizeof *entry); | ||
3618 | if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { | ||
3619 | entry->status = IB_WC_SUCCESS; | ||
3620 | } else { | ||
3621 | entry->status = IB_WC_WR_FLUSH_ERR; | ||
3622 | } | ||
3623 | |||
3624 | entry->qp = &nesqp->ibqp; | ||
3625 | entry->src_qp = nesqp->hwqp.qp_id; | ||
3626 | |||
3627 | if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { | ||
3628 | if (nesqp->skip_lsmm) { | ||
3629 | nesqp->skip_lsmm = 0; | ||
3630 | wq_tail = nesqp->hwqp.sq_tail++; | ||
3631 | } | ||
3632 | |||
3633 | /* Working on a SQ Completion*/ | ||
3634 | wq_tail = wqe_index; | ||
3635 | nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1); | ||
3636 | wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. | ||
3637 | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | | ||
3638 | ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. | ||
3639 | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); | ||
3640 | entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. | ||
3641 | wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); | ||
3642 | |||
3643 | switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. | ||
3644 | wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { | ||
3645 | case NES_IWARP_SQ_OP_RDMAW: | ||
3646 | nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); | ||
3647 | entry->opcode = IB_WC_RDMA_WRITE; | ||
3648 | break; | ||
3649 | case NES_IWARP_SQ_OP_RDMAR: | ||
3650 | nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); | ||
3651 | entry->opcode = IB_WC_RDMA_READ; | ||
3652 | entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. | ||
3653 | wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); | ||
3654 | break; | ||
3655 | case NES_IWARP_SQ_OP_SENDINV: | ||
3656 | case NES_IWARP_SQ_OP_SENDSEINV: | ||
3657 | case NES_IWARP_SQ_OP_SEND: | ||
3658 | case NES_IWARP_SQ_OP_SENDSE: | ||
3659 | nes_debug(NES_DBG_CQ, "Operation = Send.\n"); | ||
3660 | entry->opcode = IB_WC_SEND; | ||
3661 | break; | ||
3662 | } | ||
3663 | } else { | ||
3664 | /* Working on a RQ Completion*/ | ||
3665 | wq_tail = wqe_index; | ||
3666 | nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1); | ||
3667 | entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); | ||
3668 | wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | | ||
3669 | ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); | ||
3670 | entry->opcode = IB_WC_RECV; | ||
3671 | } | ||
3672 | entry->wr_id = wrid; | ||
3673 | |||
3674 | if (++head >= cq_size) | ||
3675 | head = 0; | ||
3676 | cqe_count++; | ||
3677 | nescq->polled_completions++; | ||
3678 | if ((nescq->polled_completions > (cq_size / 2)) || | ||
3679 | (nescq->polled_completions == 255)) { | ||
3680 | nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" | ||
3681 | " are pending %u of %u.\n", | ||
3682 | nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); | ||
3683 | nes_write32(nesdev->regs+NES_CQE_ALLOC, | ||
3684 | nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); | ||
3685 | nescq->polled_completions = 0; | ||
3686 | } | ||
3687 | entry++; | ||
3688 | } else | ||
3689 | break; | ||
3690 | } | ||
3691 | |||
3692 | if (nescq->polled_completions) { | ||
3693 | nes_write32(nesdev->regs+NES_CQE_ALLOC, | ||
3694 | nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); | ||
3695 | nescq->polled_completions = 0; | ||
3696 | } | ||
3697 | |||
3698 | nescq->hw_cq.cq_head = head; | ||
3699 | nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", | ||
3700 | cqe_count, nescq->hw_cq.cq_number); | ||
3701 | |||
3702 | spin_unlock_irqrestore(&nescq->lock, flags); | ||
3703 | |||
3704 | return cqe_count; | ||
3705 | } | ||
3706 | |||
3707 | |||
3708 | /** | ||
3709 | * nes_req_notify_cq | ||
3710 | */ | ||
3711 | static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) | ||
3712 | { | ||
3713 | struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); | ||
3714 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3715 | struct nes_cq *nescq = to_nescq(ibcq); | ||
3716 | u32 cq_arm; | ||
3717 | |||
3718 | nes_debug(NES_DBG_CQ, "Requesting notification for CQ%u.\n", | ||
3719 | nescq->hw_cq.cq_number); | ||
3720 | |||
3721 | cq_arm = nescq->hw_cq.cq_number; | ||
3722 | if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP) | ||
3723 | cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT; | ||
3724 | else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) | ||
3725 | cq_arm |= NES_CQE_ALLOC_NOTIFY_SE; | ||
3726 | else | ||
3727 | return -EINVAL; | ||
3728 | |||
3729 | nes_write32(nesdev->regs+NES_CQE_ALLOC, cq_arm); | ||
3730 | nes_read32(nesdev->regs+NES_CQE_ALLOC); | ||
3731 | |||
3732 | return 0; | ||
3733 | } | ||
3734 | |||
3735 | |||
3736 | /** | ||
3737 | * nes_init_ofa_device | ||
3738 | */ | ||
3739 | struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) | ||
3740 | { | ||
3741 | struct nes_ib_device *nesibdev; | ||
3742 | struct nes_vnic *nesvnic = netdev_priv(netdev); | ||
3743 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3744 | |||
3745 | nesibdev = (struct nes_ib_device *)ib_alloc_device(sizeof(struct nes_ib_device)); | ||
3746 | if (nesibdev == NULL) { | ||
3747 | return NULL; | ||
3748 | } | ||
3749 | strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX); | ||
3750 | nesibdev->ibdev.owner = THIS_MODULE; | ||
3751 | |||
3752 | nesibdev->ibdev.node_type = RDMA_NODE_RNIC; | ||
3753 | memset(&nesibdev->ibdev.node_guid, 0, sizeof(nesibdev->ibdev.node_guid)); | ||
3754 | memcpy(&nesibdev->ibdev.node_guid, netdev->dev_addr, 6); | ||
3755 | |||
3756 | nesibdev->ibdev.uverbs_cmd_mask = | ||
3757 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
3758 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
3759 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
3760 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
3761 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
3762 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
3763 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
3764 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
3765 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
3766 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
3767 | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | | ||
3768 | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | | ||
3769 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | ||
3770 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
3771 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
3772 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | ||
3773 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
3774 | (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | | ||
3775 | (1ull << IB_USER_VERBS_CMD_BIND_MW) | | ||
3776 | (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) | | ||
3777 | (1ull << IB_USER_VERBS_CMD_POST_RECV) | | ||
3778 | (1ull << IB_USER_VERBS_CMD_POST_SEND); | ||
3779 | |||
3780 | nesibdev->ibdev.phys_port_cnt = 1; | ||
3781 | nesibdev->ibdev.num_comp_vectors = 1; | ||
3782 | nesibdev->ibdev.dma_device = &nesdev->pcidev->dev; | ||
3783 | nesibdev->ibdev.class_dev.dev = &nesdev->pcidev->dev; | ||
3784 | nesibdev->ibdev.query_device = nes_query_device; | ||
3785 | nesibdev->ibdev.query_port = nes_query_port; | ||
3786 | nesibdev->ibdev.modify_port = nes_modify_port; | ||
3787 | nesibdev->ibdev.query_pkey = nes_query_pkey; | ||
3788 | nesibdev->ibdev.query_gid = nes_query_gid; | ||
3789 | nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext; | ||
3790 | nesibdev->ibdev.dealloc_ucontext = nes_dealloc_ucontext; | ||
3791 | nesibdev->ibdev.mmap = nes_mmap; | ||
3792 | nesibdev->ibdev.alloc_pd = nes_alloc_pd; | ||
3793 | nesibdev->ibdev.dealloc_pd = nes_dealloc_pd; | ||
3794 | nesibdev->ibdev.create_ah = nes_create_ah; | ||
3795 | nesibdev->ibdev.destroy_ah = nes_destroy_ah; | ||
3796 | nesibdev->ibdev.create_qp = nes_create_qp; | ||
3797 | nesibdev->ibdev.modify_qp = nes_modify_qp; | ||
3798 | nesibdev->ibdev.query_qp = nes_query_qp; | ||
3799 | nesibdev->ibdev.destroy_qp = nes_destroy_qp; | ||
3800 | nesibdev->ibdev.create_cq = nes_create_cq; | ||
3801 | nesibdev->ibdev.destroy_cq = nes_destroy_cq; | ||
3802 | nesibdev->ibdev.poll_cq = nes_poll_cq; | ||
3803 | nesibdev->ibdev.get_dma_mr = nes_get_dma_mr; | ||
3804 | nesibdev->ibdev.reg_phys_mr = nes_reg_phys_mr; | ||
3805 | nesibdev->ibdev.reg_user_mr = nes_reg_user_mr; | ||
3806 | nesibdev->ibdev.dereg_mr = nes_dereg_mr; | ||
3807 | nesibdev->ibdev.alloc_mw = nes_alloc_mw; | ||
3808 | nesibdev->ibdev.dealloc_mw = nes_dealloc_mw; | ||
3809 | nesibdev->ibdev.bind_mw = nes_bind_mw; | ||
3810 | |||
3811 | nesibdev->ibdev.alloc_fmr = nes_alloc_fmr; | ||
3812 | nesibdev->ibdev.unmap_fmr = nes_unmap_fmr; | ||
3813 | nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr; | ||
3814 | nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr; | ||
3815 | |||
3816 | nesibdev->ibdev.attach_mcast = nes_multicast_attach; | ||
3817 | nesibdev->ibdev.detach_mcast = nes_multicast_detach; | ||
3818 | nesibdev->ibdev.process_mad = nes_process_mad; | ||
3819 | |||
3820 | nesibdev->ibdev.req_notify_cq = nes_req_notify_cq; | ||
3821 | nesibdev->ibdev.post_send = nes_post_send; | ||
3822 | nesibdev->ibdev.post_recv = nes_post_recv; | ||
3823 | |||
3824 | nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL); | ||
3825 | if (nesibdev->ibdev.iwcm == NULL) { | ||
3826 | ib_dealloc_device(&nesibdev->ibdev); | ||
3827 | return NULL; | ||
3828 | } | ||
3829 | nesibdev->ibdev.iwcm->add_ref = nes_add_ref; | ||
3830 | nesibdev->ibdev.iwcm->rem_ref = nes_rem_ref; | ||
3831 | nesibdev->ibdev.iwcm->get_qp = nes_get_qp; | ||
3832 | nesibdev->ibdev.iwcm->connect = nes_connect; | ||
3833 | nesibdev->ibdev.iwcm->accept = nes_accept; | ||
3834 | nesibdev->ibdev.iwcm->reject = nes_reject; | ||
3835 | nesibdev->ibdev.iwcm->create_listen = nes_create_listen; | ||
3836 | nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen; | ||
3837 | |||
3838 | return nesibdev; | ||
3839 | } | ||
3840 | |||
3841 | |||
3842 | /** | ||
3843 | * nes_destroy_ofa_device | ||
3844 | */ | ||
3845 | void nes_destroy_ofa_device(struct nes_ib_device *nesibdev) | ||
3846 | { | ||
3847 | if (nesibdev == NULL) | ||
3848 | return; | ||
3849 | |||
3850 | nes_unregister_ofa_device(nesibdev); | ||
3851 | |||
3852 | kfree(nesibdev->ibdev.iwcm); | ||
3853 | ib_dealloc_device(&nesibdev->ibdev); | ||
3854 | } | ||
3855 | |||
3856 | |||
3857 | /** | ||
3858 | * nes_register_ofa_device | ||
3859 | */ | ||
3860 | int nes_register_ofa_device(struct nes_ib_device *nesibdev) | ||
3861 | { | ||
3862 | struct nes_vnic *nesvnic = nesibdev->nesvnic; | ||
3863 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3864 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
3865 | int i, ret; | ||
3866 | |||
3867 | ret = ib_register_device(&nesvnic->nesibdev->ibdev); | ||
3868 | if (ret) { | ||
3869 | return ret; | ||
3870 | } | ||
3871 | |||
3872 | /* Get the resources allocated to this device */ | ||
3873 | nesibdev->max_cq = (nesadapter->max_cq-NES_FIRST_QPN) / nesadapter->port_count; | ||
3874 | nesibdev->max_mr = nesadapter->max_mr / nesadapter->port_count; | ||
3875 | nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count; | ||
3876 | nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count; | ||
3877 | |||
3878 | for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) { | ||
3879 | ret = class_device_create_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]); | ||
3880 | if (ret) { | ||
3881 | while (i > 0) { | ||
3882 | i--; | ||
3883 | class_device_remove_file(&nesibdev->ibdev.class_dev, | ||
3884 | nes_class_attributes[i]); | ||
3885 | } | ||
3886 | ib_unregister_device(&nesibdev->ibdev); | ||
3887 | return ret; | ||
3888 | } | ||
3889 | } | ||
3890 | |||
3891 | nesvnic->of_device_registered = 1; | ||
3892 | |||
3893 | return 0; | ||
3894 | } | ||
3895 | |||
3896 | |||
3897 | /** | ||
3898 | * nes_unregister_ofa_device | ||
3899 | */ | ||
3900 | void nes_unregister_ofa_device(struct nes_ib_device *nesibdev) | ||
3901 | { | ||
3902 | struct nes_vnic *nesvnic = nesibdev->nesvnic; | ||
3903 | int i; | ||
3904 | |||
3905 | if (nesibdev == NULL) | ||
3906 | return; | ||
3907 | |||
3908 | for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) { | ||
3909 | class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]); | ||
3910 | } | ||
3911 | |||
3912 | if (nesvnic->of_device_registered) { | ||
3913 | ib_unregister_device(&nesibdev->ibdev); | ||
3914 | } | ||
3915 | |||
3916 | nesvnic->of_device_registered = 0; | ||
3917 | } | ||
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h new file mode 100644 index 000000000000..6c6b4da5184f --- /dev/null +++ b/drivers/infiniband/hw/nes/nes_verbs.h | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef NES_VERBS_H | ||
36 | #define NES_VERBS_H | ||
37 | |||
38 | struct nes_device; | ||
39 | |||
40 | #define NES_MAX_USER_DB_REGIONS 4096 | ||
41 | #define NES_MAX_USER_WQ_REGIONS 4096 | ||
42 | |||
43 | struct nes_ucontext { | ||
44 | struct ib_ucontext ibucontext; | ||
45 | struct nes_device *nesdev; | ||
46 | unsigned long mmap_wq_offset; | ||
47 | unsigned long mmap_cq_offset; /* to be removed */ | ||
48 | int index; /* rnic index (minor) */ | ||
49 | unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)]; | ||
50 | u16 mmap_db_index[NES_MAX_USER_DB_REGIONS]; | ||
51 | u16 first_free_db; | ||
52 | unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)]; | ||
53 | struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS]; | ||
54 | u16 first_free_wq; | ||
55 | struct list_head cq_reg_mem_list; | ||
56 | struct list_head qp_reg_mem_list; | ||
57 | u32 mcrqf; | ||
58 | atomic_t usecnt; | ||
59 | }; | ||
60 | |||
61 | struct nes_pd { | ||
62 | struct ib_pd ibpd; | ||
63 | u16 pd_id; | ||
64 | atomic_t sqp_count; | ||
65 | u16 mmap_db_index; | ||
66 | }; | ||
67 | |||
68 | struct nes_mr { | ||
69 | union { | ||
70 | struct ib_mr ibmr; | ||
71 | struct ib_mw ibmw; | ||
72 | struct ib_fmr ibfmr; | ||
73 | }; | ||
74 | struct ib_umem *region; | ||
75 | u16 pbls_used; | ||
76 | u8 mode; | ||
77 | u8 pbl_4k; | ||
78 | }; | ||
79 | |||
80 | struct nes_hw_pb { | ||
81 | __le32 pa_low; | ||
82 | __le32 pa_high; | ||
83 | }; | ||
84 | |||
85 | struct nes_vpbl { | ||
86 | dma_addr_t pbl_pbase; | ||
87 | struct nes_hw_pb *pbl_vbase; | ||
88 | }; | ||
89 | |||
90 | struct nes_root_vpbl { | ||
91 | dma_addr_t pbl_pbase; | ||
92 | struct nes_hw_pb *pbl_vbase; | ||
93 | struct nes_vpbl *leaf_vpbl; | ||
94 | }; | ||
95 | |||
96 | struct nes_fmr { | ||
97 | struct nes_mr nesmr; | ||
98 | u32 leaf_pbl_cnt; | ||
99 | struct nes_root_vpbl root_vpbl; | ||
100 | struct ib_qp *ib_qp; | ||
101 | int access_rights; | ||
102 | struct ib_fmr_attr attr; | ||
103 | }; | ||
104 | |||
105 | struct nes_av; | ||
106 | |||
107 | struct nes_cq { | ||
108 | struct ib_cq ibcq; | ||
109 | struct nes_hw_cq hw_cq; | ||
110 | u32 polled_completions; | ||
111 | u32 cq_mem_size; | ||
112 | spinlock_t lock; | ||
113 | u8 virtual_cq; | ||
114 | u8 pad[3]; | ||
115 | }; | ||
116 | |||
117 | struct nes_wq { | ||
118 | spinlock_t lock; | ||
119 | }; | ||
120 | |||
121 | struct iw_cm_id; | ||
122 | struct ietf_mpa_frame; | ||
123 | |||
124 | struct nes_qp { | ||
125 | struct ib_qp ibqp; | ||
126 | void *allocated_buffer; | ||
127 | struct iw_cm_id *cm_id; | ||
128 | struct workqueue_struct *wq; | ||
129 | struct work_struct disconn_work; | ||
130 | struct nes_cq *nesscq; | ||
131 | struct nes_cq *nesrcq; | ||
132 | struct nes_pd *nespd; | ||
133 | void *cm_node; /* handle of the node this QP is associated with */ | ||
134 | struct ietf_mpa_frame *ietf_frame; | ||
135 | dma_addr_t ietf_frame_pbase; | ||
136 | wait_queue_head_t state_waitq; | ||
137 | unsigned long socket; | ||
138 | struct nes_hw_qp hwqp; | ||
139 | struct work_struct work; | ||
140 | struct work_struct ae_work; | ||
141 | enum ib_qp_state ibqp_state; | ||
142 | u32 iwarp_state; | ||
143 | u32 hte_index; | ||
144 | u32 last_aeq; | ||
145 | u32 qp_mem_size; | ||
146 | atomic_t refcount; | ||
147 | atomic_t close_timer_started; | ||
148 | u32 mmap_sq_db_index; | ||
149 | u32 mmap_rq_db_index; | ||
150 | spinlock_t lock; | ||
151 | struct nes_qp_context *nesqp_context; | ||
152 | dma_addr_t nesqp_context_pbase; | ||
153 | void *pbl_vbase; | ||
154 | dma_addr_t pbl_pbase; | ||
155 | struct page *page; | ||
156 | wait_queue_head_t kick_waitq; | ||
157 | u16 in_disconnect; | ||
158 | u16 private_data_len; | ||
159 | u8 active_conn; | ||
160 | u8 skip_lsmm; | ||
161 | u8 user_mode; | ||
162 | u8 hte_added; | ||
163 | u8 hw_iwarp_state; | ||
164 | u8 flush_issued; | ||
165 | u8 hw_tcp_state; | ||
166 | u8 disconn_pending; | ||
167 | u8 destroyed; | ||
168 | }; | ||
169 | #endif /* NES_VERBS_H */ | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index a082466f4a83..09f5371137a1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -680,12 +680,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
680 | 680 | ||
681 | neigh = *to_ipoib_neigh(skb->dst->neighbour); | 681 | neigh = *to_ipoib_neigh(skb->dst->neighbour); |
682 | 682 | ||
683 | if (ipoib_cm_get(neigh)) { | 683 | if (neigh->ah) |
684 | if (ipoib_cm_up(neigh)) { | ||
685 | ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); | ||
686 | goto out; | ||
687 | } | ||
688 | } else if (neigh->ah) { | ||
689 | if (unlikely((memcmp(&neigh->dgid.raw, | 684 | if (unlikely((memcmp(&neigh->dgid.raw, |
690 | skb->dst->neighbour->ha + 4, | 685 | skb->dst->neighbour->ha + 4, |
691 | sizeof(union ib_gid))) || | 686 | sizeof(union ib_gid))) || |
@@ -706,6 +701,12 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
706 | goto out; | 701 | goto out; |
707 | } | 702 | } |
708 | 703 | ||
704 | if (ipoib_cm_get(neigh)) { | ||
705 | if (ipoib_cm_up(neigh)) { | ||
706 | ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); | ||
707 | goto out; | ||
708 | } | ||
709 | } else if (neigh->ah) { | ||
709 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); | 710 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); |
710 | goto out; | 711 | goto out; |
711 | } | 712 | } |
@@ -813,11 +814,9 @@ static void ipoib_neigh_cleanup(struct neighbour *n) | |||
813 | struct ipoib_ah *ah = NULL; | 814 | struct ipoib_ah *ah = NULL; |
814 | 815 | ||
815 | neigh = *to_ipoib_neigh(n); | 816 | neigh = *to_ipoib_neigh(n); |
816 | if (neigh) { | 817 | if (neigh) |
817 | priv = netdev_priv(neigh->dev); | 818 | priv = netdev_priv(neigh->dev); |
818 | ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n", | 819 | else |
819 | n->dev->name); | ||
820 | } else | ||
821 | return; | 820 | return; |
822 | ipoib_dbg(priv, | 821 | ipoib_dbg(priv, |
823 | "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", | 822 | "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 195ce7c12319..fd4a49fc4773 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -204,6 +204,22 @@ out: | |||
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | 206 | ||
207 | static int srp_new_cm_id(struct srp_target_port *target) | ||
208 | { | ||
209 | struct ib_cm_id *new_cm_id; | ||
210 | |||
211 | new_cm_id = ib_create_cm_id(target->srp_host->dev->dev, | ||
212 | srp_cm_handler, target); | ||
213 | if (IS_ERR(new_cm_id)) | ||
214 | return PTR_ERR(new_cm_id); | ||
215 | |||
216 | if (target->cm_id) | ||
217 | ib_destroy_cm_id(target->cm_id); | ||
218 | target->cm_id = new_cm_id; | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
207 | static int srp_create_target_ib(struct srp_target_port *target) | 223 | static int srp_create_target_ib(struct srp_target_port *target) |
208 | { | 224 | { |
209 | struct ib_qp_init_attr *init_attr; | 225 | struct ib_qp_init_attr *init_attr; |
@@ -436,6 +452,7 @@ static void srp_remove_work(struct work_struct *work) | |||
436 | 452 | ||
437 | static int srp_connect_target(struct srp_target_port *target) | 453 | static int srp_connect_target(struct srp_target_port *target) |
438 | { | 454 | { |
455 | int retries = 3; | ||
439 | int ret; | 456 | int ret; |
440 | 457 | ||
441 | ret = srp_lookup_path(target); | 458 | ret = srp_lookup_path(target); |
@@ -468,6 +485,21 @@ static int srp_connect_target(struct srp_target_port *target) | |||
468 | case SRP_DLID_REDIRECT: | 485 | case SRP_DLID_REDIRECT: |
469 | break; | 486 | break; |
470 | 487 | ||
488 | case SRP_STALE_CONN: | ||
489 | /* Our current CM id was stale, and is now in timewait. | ||
490 | * Try to reconnect with a new one. | ||
491 | */ | ||
492 | if (!retries-- || srp_new_cm_id(target)) { | ||
493 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
494 | "giving up on stale connection\n"); | ||
495 | target->status = -ECONNRESET; | ||
496 | return target->status; | ||
497 | } | ||
498 | |||
499 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
500 | "retrying stale connection\n"); | ||
501 | break; | ||
502 | |||
471 | default: | 503 | default: |
472 | return target->status; | 504 | return target->status; |
473 | } | 505 | } |
@@ -507,7 +539,6 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re | |||
507 | 539 | ||
508 | static int srp_reconnect_target(struct srp_target_port *target) | 540 | static int srp_reconnect_target(struct srp_target_port *target) |
509 | { | 541 | { |
510 | struct ib_cm_id *new_cm_id; | ||
511 | struct ib_qp_attr qp_attr; | 542 | struct ib_qp_attr qp_attr; |
512 | struct srp_request *req, *tmp; | 543 | struct srp_request *req, *tmp; |
513 | struct ib_wc wc; | 544 | struct ib_wc wc; |
@@ -526,14 +557,9 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
526 | * Now get a new local CM ID so that we avoid confusing the | 557 | * Now get a new local CM ID so that we avoid confusing the |
527 | * target in case things are really fouled up. | 558 | * target in case things are really fouled up. |
528 | */ | 559 | */ |
529 | new_cm_id = ib_create_cm_id(target->srp_host->dev->dev, | 560 | ret = srp_new_cm_id(target); |
530 | srp_cm_handler, target); | 561 | if (ret) |
531 | if (IS_ERR(new_cm_id)) { | ||
532 | ret = PTR_ERR(new_cm_id); | ||
533 | goto err; | 562 | goto err; |
534 | } | ||
535 | ib_destroy_cm_id(target->cm_id); | ||
536 | target->cm_id = new_cm_id; | ||
537 | 563 | ||
538 | qp_attr.qp_state = IB_QPS_RESET; | 564 | qp_attr.qp_state = IB_QPS_RESET; |
539 | ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); | 565 | ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); |
@@ -1171,6 +1197,11 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | |||
1171 | target->status = -ECONNRESET; | 1197 | target->status = -ECONNRESET; |
1172 | break; | 1198 | break; |
1173 | 1199 | ||
1200 | case IB_CM_REJ_STALE_CONN: | ||
1201 | shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); | ||
1202 | target->status = SRP_STALE_CONN; | ||
1203 | break; | ||
1204 | |||
1174 | default: | 1205 | default: |
1175 | shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", | 1206 | shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", |
1176 | event->param.rej_rcvd.reason); | 1207 | event->param.rej_rcvd.reason); |
@@ -1862,11 +1893,9 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
1862 | if (ret) | 1893 | if (ret) |
1863 | goto err; | 1894 | goto err; |
1864 | 1895 | ||
1865 | target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target); | 1896 | ret = srp_new_cm_id(target); |
1866 | if (IS_ERR(target->cm_id)) { | 1897 | if (ret) |
1867 | ret = PTR_ERR(target->cm_id); | ||
1868 | goto err_free; | 1898 | goto err_free; |
1869 | } | ||
1870 | 1899 | ||
1871 | target->qp_in_error = 0; | 1900 | target->qp_in_error = 0; |
1872 | ret = srp_connect_target(target); | 1901 | ret = srp_connect_target(target); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 4a3c1f37e4c2..cb6eb816024a 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -54,6 +54,7 @@ enum { | |||
54 | 54 | ||
55 | SRP_PORT_REDIRECT = 1, | 55 | SRP_PORT_REDIRECT = 1, |
56 | SRP_DLID_REDIRECT = 2, | 56 | SRP_DLID_REDIRECT = 2, |
57 | SRP_STALE_CONN = 3, | ||
57 | 58 | ||
58 | SRP_MAX_LUN = 512, | 59 | SRP_MAX_LUN = 512, |
59 | SRP_DEF_SG_TABLESIZE = 12, | 60 | SRP_DEF_SG_TABLESIZE = 12, |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 64c66b3769c9..4a938780dfc3 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -137,12 +137,14 @@ err_out: | |||
137 | EXPORT_SYMBOL_GPL(led_classdev_register); | 137 | EXPORT_SYMBOL_GPL(led_classdev_register); |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * led_classdev_unregister - unregisters a object of led_properties class. | 140 | * __led_classdev_unregister - unregisters a object of led_properties class. |
141 | * @led_cdev: the led device to unregister | 141 | * @led_cdev: the led device to unregister |
142 | * @suspended: indicates whether system-wide suspend or resume is in progress | ||
142 | * | 143 | * |
143 | * Unregisters a previously registered via led_classdev_register object. | 144 | * Unregisters a previously registered via led_classdev_register object. |
144 | */ | 145 | */ |
145 | void led_classdev_unregister(struct led_classdev *led_cdev) | 146 | void __led_classdev_unregister(struct led_classdev *led_cdev, |
147 | bool suspended) | ||
146 | { | 148 | { |
147 | device_remove_file(led_cdev->dev, &dev_attr_brightness); | 149 | device_remove_file(led_cdev->dev, &dev_attr_brightness); |
148 | #ifdef CONFIG_LEDS_TRIGGERS | 150 | #ifdef CONFIG_LEDS_TRIGGERS |
@@ -153,13 +155,16 @@ void led_classdev_unregister(struct led_classdev *led_cdev) | |||
153 | up_write(&led_cdev->trigger_lock); | 155 | up_write(&led_cdev->trigger_lock); |
154 | #endif | 156 | #endif |
155 | 157 | ||
156 | device_unregister(led_cdev->dev); | 158 | if (suspended) |
159 | device_pm_schedule_removal(led_cdev->dev); | ||
160 | else | ||
161 | device_unregister(led_cdev->dev); | ||
157 | 162 | ||
158 | down_write(&leds_list_lock); | 163 | down_write(&leds_list_lock); |
159 | list_del(&led_cdev->node); | 164 | list_del(&led_cdev->node); |
160 | up_write(&leds_list_lock); | 165 | up_write(&leds_list_lock); |
161 | } | 166 | } |
162 | EXPORT_SYMBOL_GPL(led_classdev_unregister); | 167 | EXPORT_SYMBOL_GPL(__led_classdev_unregister); |
163 | 168 | ||
164 | static int __init leds_init(void) | 169 | static int __init leds_init(void) |
165 | { | 170 | { |
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 01b8eca7ccd5..6e6dd17ab572 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c | |||
@@ -111,7 +111,7 @@ static enum macii_state { | |||
111 | static struct adb_request *current_req; /* first request struct in the queue */ | 111 | static struct adb_request *current_req; /* first request struct in the queue */ |
112 | static struct adb_request *last_req; /* last request struct in the queue */ | 112 | static struct adb_request *last_req; /* last request struct in the queue */ |
113 | static unsigned char reply_buf[16]; /* storage for autopolled replies */ | 113 | static unsigned char reply_buf[16]; /* storage for autopolled replies */ |
114 | static unsigned char *reply_ptr; /* next byte in req->data or reply_buf */ | 114 | static unsigned char *reply_ptr; /* next byte in reply_buf or req->reply */ |
115 | static int reading_reply; /* store reply in reply_buf else req->reply */ | 115 | static int reading_reply; /* store reply in reply_buf else req->reply */ |
116 | static int data_index; /* index of the next byte to send from req->data */ | 116 | static int data_index; /* index of the next byte to send from req->data */ |
117 | static int reply_len; /* number of bytes received in reply_buf or req->reply */ | 117 | static int reply_len; /* number of bytes received in reply_buf or req->reply */ |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f234ba3f0404..7d170cd381c3 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -920,8 +920,7 @@ config ENC28J60 | |||
920 | ---help--- | 920 | ---help--- |
921 | Support for the Microchip EN28J60 ethernet chip. | 921 | Support for the Microchip EN28J60 ethernet chip. |
922 | 922 | ||
923 | To compile this driver as a module, choose M here and read | 923 | To compile this driver as a module, choose M here. The module will be |
924 | <file:Documentation/networking/net-modules.txt>. The module will be | ||
925 | called enc28j60. | 924 | called enc28j60. |
926 | 925 | ||
927 | config ENC28J60_WRITEVERIFY | 926 | config ENC28J60_WRITEVERIFY |
@@ -2041,8 +2040,7 @@ config IGB | |||
2041 | More specific information on configuring the driver is in | 2040 | More specific information on configuring the driver is in |
2042 | <file:Documentation/networking/e1000.txt>. | 2041 | <file:Documentation/networking/e1000.txt>. |
2043 | 2042 | ||
2044 | To compile this driver as a module, choose M here and read | 2043 | To compile this driver as a module, choose M here. The module |
2045 | <file:Documentation/networking/net-modules.txt>. The module | ||
2046 | will be called igb. | 2044 | will be called igb. |
2047 | 2045 | ||
2048 | source "drivers/net/ixp2000/Kconfig" | 2046 | source "drivers/net/ixp2000/Kconfig" |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index d48c396bdabb..901c824bfe6d 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -1070,9 +1070,7 @@ void *cxgb_alloc_mem(unsigned long size) | |||
1070 | */ | 1070 | */ |
1071 | void cxgb_free_mem(void *addr) | 1071 | void cxgb_free_mem(void *addr) |
1072 | { | 1072 | { |
1073 | unsigned long p = (unsigned long)addr; | 1073 | if (is_vmalloc_addr(addr)) |
1074 | |||
1075 | if (p >= VMALLOC_START && p < VMALLOC_END) | ||
1076 | vfree(addr); | 1074 | vfree(addr); |
1077 | else | 1075 | else |
1078 | kfree(addr); | 1076 | kfree(addr); |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 11b83dae00ac..e04bf9926441 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -262,8 +262,8 @@ static void tm_isr(struct scc_priv *priv); | |||
262 | 262 | ||
263 | static int io[MAX_NUM_DEVS] __initdata = { 0, }; | 263 | static int io[MAX_NUM_DEVS] __initdata = { 0, }; |
264 | 264 | ||
265 | /* Beware! hw[] is also used in cleanup_module(). */ | 265 | /* Beware! hw[] is also used in dmascc_exit(). */ |
266 | static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE; | 266 | static struct scc_hardware hw[NUM_TYPES] = HARDWARE; |
267 | 267 | ||
268 | 268 | ||
269 | /* Global variables */ | 269 | /* Global variables */ |
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 535a4461d88c..61dc4951d6b0 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -617,9 +617,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) | |||
617 | int err; | 617 | int err; |
618 | 618 | ||
619 | #define QUERY_ADAPTER_OUT_SIZE 0x100 | 619 | #define QUERY_ADAPTER_OUT_SIZE 0x100 |
620 | #define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00 | ||
621 | #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 | ||
622 | #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 | ||
623 | #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 | 620 | #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 |
624 | #define QUERY_ADAPTER_VSD_OFFSET 0x20 | 621 | #define QUERY_ADAPTER_VSD_OFFSET 0x20 |
625 | 622 | ||
@@ -633,9 +630,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) | |||
633 | if (err) | 630 | if (err) |
634 | goto out; | 631 | goto out; |
635 | 632 | ||
636 | MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); | ||
637 | MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); | ||
638 | MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); | ||
639 | MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); | 633 | MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); |
640 | 634 | ||
641 | get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, | 635 | get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, |
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index 7e1dd9e25cfb..e16dec890413 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -99,9 +99,6 @@ struct mlx4_dev_cap { | |||
99 | }; | 99 | }; |
100 | 100 | ||
101 | struct mlx4_adapter { | 101 | struct mlx4_adapter { |
102 | u32 vendor_id; | ||
103 | u32 device_id; | ||
104 | u32 revision_id; | ||
105 | char board_id[MLX4_BOARD_ID_LEN]; | 102 | char board_id[MLX4_BOARD_ID_LEN]; |
106 | u8 inta_pin; | 103 | u8 inta_pin; |
107 | }; | 104 | }; |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 89b3f0b7cdc0..08bfc130a33e 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); | |||
71 | 71 | ||
72 | #endif /* CONFIG_PCI_MSI */ | 72 | #endif /* CONFIG_PCI_MSI */ |
73 | 73 | ||
74 | static const char mlx4_version[] __devinitdata = | 74 | static char mlx4_version[] __devinitdata = |
75 | DRV_NAME ": Mellanox ConnectX core driver v" | 75 | DRV_NAME ": Mellanox ConnectX core driver v" |
76 | DRV_VERSION " (" DRV_RELDATE ")\n"; | 76 | DRV_VERSION " (" DRV_RELDATE ")\n"; |
77 | 77 | ||
@@ -163,7 +163,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
163 | return 0; | 163 | return 0; |
164 | } | 164 | } |
165 | 165 | ||
166 | static int __devinit mlx4_load_fw(struct mlx4_dev *dev) | 166 | static int mlx4_load_fw(struct mlx4_dev *dev) |
167 | { | 167 | { |
168 | struct mlx4_priv *priv = mlx4_priv(dev); | 168 | struct mlx4_priv *priv = mlx4_priv(dev); |
169 | int err; | 169 | int err; |
@@ -197,8 +197,8 @@ err_free: | |||
197 | return err; | 197 | return err; |
198 | } | 198 | } |
199 | 199 | ||
200 | static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | 200 | static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, |
201 | int cmpt_entry_sz) | 201 | int cmpt_entry_sz) |
202 | { | 202 | { |
203 | struct mlx4_priv *priv = mlx4_priv(dev); | 203 | struct mlx4_priv *priv = mlx4_priv(dev); |
204 | int err; | 204 | int err; |
@@ -534,7 +534,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
534 | } | 534 | } |
535 | 535 | ||
536 | priv->eq_table.inta_pin = adapter.inta_pin; | 536 | priv->eq_table.inta_pin = adapter.inta_pin; |
537 | dev->rev_id = adapter.revision_id; | ||
538 | memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); | 537 | memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); |
539 | 538 | ||
540 | return 0; | 539 | return 0; |
@@ -688,7 +687,7 @@ err_uar_table_free: | |||
688 | return err; | 687 | return err; |
689 | } | 688 | } |
690 | 689 | ||
691 | static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev) | 690 | static void mlx4_enable_msi_x(struct mlx4_dev *dev) |
692 | { | 691 | { |
693 | struct mlx4_priv *priv = mlx4_priv(dev); | 692 | struct mlx4_priv *priv = mlx4_priv(dev); |
694 | struct msix_entry entries[MLX4_NUM_EQ]; | 693 | struct msix_entry entries[MLX4_NUM_EQ]; |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 0c05a10bae3b..9c9e308d0917 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -122,7 +122,7 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) | |||
122 | spin_unlock(&buddy->lock); | 122 | spin_unlock(&buddy->lock); |
123 | } | 123 | } |
124 | 124 | ||
125 | static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) | 125 | static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) |
126 | { | 126 | { |
127 | int i, s; | 127 | int i, s; |
128 | 128 | ||
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 36a7ba3134ce..3b78a3819bb3 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -230,10 +230,11 @@ static char mii_preamble_required = 0; | |||
230 | static int tc574_config(struct pcmcia_device *link); | 230 | static int tc574_config(struct pcmcia_device *link); |
231 | static void tc574_release(struct pcmcia_device *link); | 231 | static void tc574_release(struct pcmcia_device *link); |
232 | 232 | ||
233 | static void mdio_sync(kio_addr_t ioaddr, int bits); | 233 | static void mdio_sync(unsigned int ioaddr, int bits); |
234 | static int mdio_read(kio_addr_t ioaddr, int phy_id, int location); | 234 | static int mdio_read(unsigned int ioaddr, int phy_id, int location); |
235 | static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value); | 235 | static void mdio_write(unsigned int ioaddr, int phy_id, int location, |
236 | static unsigned short read_eeprom(kio_addr_t ioaddr, int index); | 236 | int value); |
237 | static unsigned short read_eeprom(unsigned int ioaddr, int index); | ||
237 | static void tc574_wait_for_completion(struct net_device *dev, int cmd); | 238 | static void tc574_wait_for_completion(struct net_device *dev, int cmd); |
238 | 239 | ||
239 | static void tc574_reset(struct net_device *dev); | 240 | static void tc574_reset(struct net_device *dev); |
@@ -341,7 +342,7 @@ static int tc574_config(struct pcmcia_device *link) | |||
341 | tuple_t tuple; | 342 | tuple_t tuple; |
342 | __le16 buf[32]; | 343 | __le16 buf[32]; |
343 | int last_fn, last_ret, i, j; | 344 | int last_fn, last_ret, i, j; |
344 | kio_addr_t ioaddr; | 345 | unsigned int ioaddr; |
345 | __be16 *phys_addr; | 346 | __be16 *phys_addr; |
346 | char *cardname; | 347 | char *cardname; |
347 | __u32 config; | 348 | __u32 config; |
@@ -515,7 +516,7 @@ static int tc574_resume(struct pcmcia_device *link) | |||
515 | 516 | ||
516 | static void dump_status(struct net_device *dev) | 517 | static void dump_status(struct net_device *dev) |
517 | { | 518 | { |
518 | kio_addr_t ioaddr = dev->base_addr; | 519 | unsigned int ioaddr = dev->base_addr; |
519 | EL3WINDOW(1); | 520 | EL3WINDOW(1); |
520 | printk(KERN_INFO " irq status %04x, rx status %04x, tx status " | 521 | printk(KERN_INFO " irq status %04x, rx status %04x, tx status " |
521 | "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS), | 522 | "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS), |
@@ -544,7 +545,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd) | |||
544 | /* Read a word from the EEPROM using the regular EEPROM access register. | 545 | /* Read a word from the EEPROM using the regular EEPROM access register. |
545 | Assume that we are in register window zero. | 546 | Assume that we are in register window zero. |
546 | */ | 547 | */ |
547 | static unsigned short read_eeprom(kio_addr_t ioaddr, int index) | 548 | static unsigned short read_eeprom(unsigned int ioaddr, int index) |
548 | { | 549 | { |
549 | int timer; | 550 | int timer; |
550 | outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd); | 551 | outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd); |
@@ -572,9 +573,9 @@ static unsigned short read_eeprom(kio_addr_t ioaddr, int index) | |||
572 | 573 | ||
573 | /* Generate the preamble required for initial synchronization and | 574 | /* Generate the preamble required for initial synchronization and |
574 | a few older transceivers. */ | 575 | a few older transceivers. */ |
575 | static void mdio_sync(kio_addr_t ioaddr, int bits) | 576 | static void mdio_sync(unsigned int ioaddr, int bits) |
576 | { | 577 | { |
577 | kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt; | 578 | unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; |
578 | 579 | ||
579 | /* Establish sync by sending at least 32 logic ones. */ | 580 | /* Establish sync by sending at least 32 logic ones. */ |
580 | while (-- bits >= 0) { | 581 | while (-- bits >= 0) { |
@@ -583,12 +584,12 @@ static void mdio_sync(kio_addr_t ioaddr, int bits) | |||
583 | } | 584 | } |
584 | } | 585 | } |
585 | 586 | ||
586 | static int mdio_read(kio_addr_t ioaddr, int phy_id, int location) | 587 | static int mdio_read(unsigned int ioaddr, int phy_id, int location) |
587 | { | 588 | { |
588 | int i; | 589 | int i; |
589 | int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; | 590 | int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; |
590 | unsigned int retval = 0; | 591 | unsigned int retval = 0; |
591 | kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt; | 592 | unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; |
592 | 593 | ||
593 | if (mii_preamble_required) | 594 | if (mii_preamble_required) |
594 | mdio_sync(ioaddr, 32); | 595 | mdio_sync(ioaddr, 32); |
@@ -608,10 +609,10 @@ static int mdio_read(kio_addr_t ioaddr, int phy_id, int location) | |||
608 | return (retval>>1) & 0xffff; | 609 | return (retval>>1) & 0xffff; |
609 | } | 610 | } |
610 | 611 | ||
611 | static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value) | 612 | static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value) |
612 | { | 613 | { |
613 | int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; | 614 | int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; |
614 | kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt; | 615 | unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; |
615 | int i; | 616 | int i; |
616 | 617 | ||
617 | if (mii_preamble_required) | 618 | if (mii_preamble_required) |
@@ -637,7 +638,7 @@ static void tc574_reset(struct net_device *dev) | |||
637 | { | 638 | { |
638 | struct el3_private *lp = netdev_priv(dev); | 639 | struct el3_private *lp = netdev_priv(dev); |
639 | int i; | 640 | int i; |
640 | kio_addr_t ioaddr = dev->base_addr; | 641 | unsigned int ioaddr = dev->base_addr; |
641 | unsigned long flags; | 642 | unsigned long flags; |
642 | 643 | ||
643 | tc574_wait_for_completion(dev, TotalReset|0x10); | 644 | tc574_wait_for_completion(dev, TotalReset|0x10); |
@@ -695,7 +696,7 @@ static void tc574_reset(struct net_device *dev) | |||
695 | mdio_write(ioaddr, lp->phys, 4, lp->advertising); | 696 | mdio_write(ioaddr, lp->phys, 4, lp->advertising); |
696 | if (!auto_polarity) { | 697 | if (!auto_polarity) { |
697 | /* works for TDK 78Q2120 series MII's */ | 698 | /* works for TDK 78Q2120 series MII's */ |
698 | int i = mdio_read(ioaddr, lp->phys, 16) | 0x20; | 699 | i = mdio_read(ioaddr, lp->phys, 16) | 0x20; |
699 | mdio_write(ioaddr, lp->phys, 16, i); | 700 | mdio_write(ioaddr, lp->phys, 16, i); |
700 | } | 701 | } |
701 | 702 | ||
@@ -741,7 +742,7 @@ static int el3_open(struct net_device *dev) | |||
741 | static void el3_tx_timeout(struct net_device *dev) | 742 | static void el3_tx_timeout(struct net_device *dev) |
742 | { | 743 | { |
743 | struct el3_private *lp = netdev_priv(dev); | 744 | struct el3_private *lp = netdev_priv(dev); |
744 | kio_addr_t ioaddr = dev->base_addr; | 745 | unsigned int ioaddr = dev->base_addr; |
745 | 746 | ||
746 | printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name); | 747 | printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name); |
747 | dump_status(dev); | 748 | dump_status(dev); |
@@ -756,7 +757,7 @@ static void el3_tx_timeout(struct net_device *dev) | |||
756 | static void pop_tx_status(struct net_device *dev) | 757 | static void pop_tx_status(struct net_device *dev) |
757 | { | 758 | { |
758 | struct el3_private *lp = netdev_priv(dev); | 759 | struct el3_private *lp = netdev_priv(dev); |
759 | kio_addr_t ioaddr = dev->base_addr; | 760 | unsigned int ioaddr = dev->base_addr; |
760 | int i; | 761 | int i; |
761 | 762 | ||
762 | /* Clear the Tx status stack. */ | 763 | /* Clear the Tx status stack. */ |
@@ -779,7 +780,7 @@ static void pop_tx_status(struct net_device *dev) | |||
779 | 780 | ||
780 | static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 781 | static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
781 | { | 782 | { |
782 | kio_addr_t ioaddr = dev->base_addr; | 783 | unsigned int ioaddr = dev->base_addr; |
783 | struct el3_private *lp = netdev_priv(dev); | 784 | struct el3_private *lp = netdev_priv(dev); |
784 | unsigned long flags; | 785 | unsigned long flags; |
785 | 786 | ||
@@ -813,7 +814,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
813 | { | 814 | { |
814 | struct net_device *dev = (struct net_device *) dev_id; | 815 | struct net_device *dev = (struct net_device *) dev_id; |
815 | struct el3_private *lp = netdev_priv(dev); | 816 | struct el3_private *lp = netdev_priv(dev); |
816 | kio_addr_t ioaddr; | 817 | unsigned int ioaddr; |
817 | unsigned status; | 818 | unsigned status; |
818 | int work_budget = max_interrupt_work; | 819 | int work_budget = max_interrupt_work; |
819 | int handled = 0; | 820 | int handled = 0; |
@@ -907,7 +908,7 @@ static void media_check(unsigned long arg) | |||
907 | { | 908 | { |
908 | struct net_device *dev = (struct net_device *) arg; | 909 | struct net_device *dev = (struct net_device *) arg; |
909 | struct el3_private *lp = netdev_priv(dev); | 910 | struct el3_private *lp = netdev_priv(dev); |
910 | kio_addr_t ioaddr = dev->base_addr; | 911 | unsigned int ioaddr = dev->base_addr; |
911 | unsigned long flags; | 912 | unsigned long flags; |
912 | unsigned short /* cable, */ media, partner; | 913 | unsigned short /* cable, */ media, partner; |
913 | 914 | ||
@@ -996,7 +997,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev) | |||
996 | static void update_stats(struct net_device *dev) | 997 | static void update_stats(struct net_device *dev) |
997 | { | 998 | { |
998 | struct el3_private *lp = netdev_priv(dev); | 999 | struct el3_private *lp = netdev_priv(dev); |
999 | kio_addr_t ioaddr = dev->base_addr; | 1000 | unsigned int ioaddr = dev->base_addr; |
1000 | u8 rx, tx, up; | 1001 | u8 rx, tx, up; |
1001 | 1002 | ||
1002 | DEBUG(2, "%s: updating the statistics.\n", dev->name); | 1003 | DEBUG(2, "%s: updating the statistics.\n", dev->name); |
@@ -1033,7 +1034,7 @@ static void update_stats(struct net_device *dev) | |||
1033 | static int el3_rx(struct net_device *dev, int worklimit) | 1034 | static int el3_rx(struct net_device *dev, int worklimit) |
1034 | { | 1035 | { |
1035 | struct el3_private *lp = netdev_priv(dev); | 1036 | struct el3_private *lp = netdev_priv(dev); |
1036 | kio_addr_t ioaddr = dev->base_addr; | 1037 | unsigned int ioaddr = dev->base_addr; |
1037 | short rx_status; | 1038 | short rx_status; |
1038 | 1039 | ||
1039 | DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", | 1040 | DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", |
@@ -1094,7 +1095,7 @@ static const struct ethtool_ops netdev_ethtool_ops = { | |||
1094 | static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 1095 | static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1095 | { | 1096 | { |
1096 | struct el3_private *lp = netdev_priv(dev); | 1097 | struct el3_private *lp = netdev_priv(dev); |
1097 | kio_addr_t ioaddr = dev->base_addr; | 1098 | unsigned int ioaddr = dev->base_addr; |
1098 | u16 *data = (u16 *)&rq->ifr_ifru; | 1099 | u16 *data = (u16 *)&rq->ifr_ifru; |
1099 | int phy = lp->phys & 0x1f; | 1100 | int phy = lp->phys & 0x1f; |
1100 | 1101 | ||
@@ -1148,7 +1149,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1148 | 1149 | ||
1149 | static void set_rx_mode(struct net_device *dev) | 1150 | static void set_rx_mode(struct net_device *dev) |
1150 | { | 1151 | { |
1151 | kio_addr_t ioaddr = dev->base_addr; | 1152 | unsigned int ioaddr = dev->base_addr; |
1152 | 1153 | ||
1153 | if (dev->flags & IFF_PROMISC) | 1154 | if (dev->flags & IFF_PROMISC) |
1154 | outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, | 1155 | outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, |
@@ -1161,7 +1162,7 @@ static void set_rx_mode(struct net_device *dev) | |||
1161 | 1162 | ||
1162 | static int el3_close(struct net_device *dev) | 1163 | static int el3_close(struct net_device *dev) |
1163 | { | 1164 | { |
1164 | kio_addr_t ioaddr = dev->base_addr; | 1165 | unsigned int ioaddr = dev->base_addr; |
1165 | struct el3_private *lp = netdev_priv(dev); | 1166 | struct el3_private *lp = netdev_priv(dev); |
1166 | struct pcmcia_device *link = lp->p_dev; | 1167 | struct pcmcia_device *link = lp->p_dev; |
1167 | 1168 | ||
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index e862d14ece79..1b1abb19c911 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -145,7 +145,7 @@ DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)"; | |||
145 | static int tc589_config(struct pcmcia_device *link); | 145 | static int tc589_config(struct pcmcia_device *link); |
146 | static void tc589_release(struct pcmcia_device *link); | 146 | static void tc589_release(struct pcmcia_device *link); |
147 | 147 | ||
148 | static u16 read_eeprom(kio_addr_t ioaddr, int index); | 148 | static u16 read_eeprom(unsigned int ioaddr, int index); |
149 | static void tc589_reset(struct net_device *dev); | 149 | static void tc589_reset(struct net_device *dev); |
150 | static void media_check(unsigned long arg); | 150 | static void media_check(unsigned long arg); |
151 | static int el3_config(struct net_device *dev, struct ifmap *map); | 151 | static int el3_config(struct net_device *dev, struct ifmap *map); |
@@ -254,7 +254,7 @@ static int tc589_config(struct pcmcia_device *link) | |||
254 | __le16 buf[32]; | 254 | __le16 buf[32]; |
255 | __be16 *phys_addr; | 255 | __be16 *phys_addr; |
256 | int last_fn, last_ret, i, j, multi = 0, fifo; | 256 | int last_fn, last_ret, i, j, multi = 0, fifo; |
257 | kio_addr_t ioaddr; | 257 | unsigned int ioaddr; |
258 | char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 258 | char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
259 | DECLARE_MAC_BUF(mac); | 259 | DECLARE_MAC_BUF(mac); |
260 | 260 | ||
@@ -403,7 +403,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd) | |||
403 | Read a word from the EEPROM using the regular EEPROM access register. | 403 | Read a word from the EEPROM using the regular EEPROM access register. |
404 | Assume that we are in register window zero. | 404 | Assume that we are in register window zero. |
405 | */ | 405 | */ |
406 | static u16 read_eeprom(kio_addr_t ioaddr, int index) | 406 | static u16 read_eeprom(unsigned int ioaddr, int index) |
407 | { | 407 | { |
408 | int i; | 408 | int i; |
409 | outw(EEPROM_READ + index, ioaddr + 10); | 409 | outw(EEPROM_READ + index, ioaddr + 10); |
@@ -421,7 +421,7 @@ static u16 read_eeprom(kio_addr_t ioaddr, int index) | |||
421 | static void tc589_set_xcvr(struct net_device *dev, int if_port) | 421 | static void tc589_set_xcvr(struct net_device *dev, int if_port) |
422 | { | 422 | { |
423 | struct el3_private *lp = netdev_priv(dev); | 423 | struct el3_private *lp = netdev_priv(dev); |
424 | kio_addr_t ioaddr = dev->base_addr; | 424 | unsigned int ioaddr = dev->base_addr; |
425 | 425 | ||
426 | EL3WINDOW(0); | 426 | EL3WINDOW(0); |
427 | switch (if_port) { | 427 | switch (if_port) { |
@@ -443,7 +443,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port) | |||
443 | 443 | ||
444 | static void dump_status(struct net_device *dev) | 444 | static void dump_status(struct net_device *dev) |
445 | { | 445 | { |
446 | kio_addr_t ioaddr = dev->base_addr; | 446 | unsigned int ioaddr = dev->base_addr; |
447 | EL3WINDOW(1); | 447 | EL3WINDOW(1); |
448 | printk(KERN_INFO " irq status %04x, rx status %04x, tx status " | 448 | printk(KERN_INFO " irq status %04x, rx status %04x, tx status " |
449 | "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS), | 449 | "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS), |
@@ -459,7 +459,7 @@ static void dump_status(struct net_device *dev) | |||
459 | /* Reset and restore all of the 3c589 registers. */ | 459 | /* Reset and restore all of the 3c589 registers. */ |
460 | static void tc589_reset(struct net_device *dev) | 460 | static void tc589_reset(struct net_device *dev) |
461 | { | 461 | { |
462 | kio_addr_t ioaddr = dev->base_addr; | 462 | unsigned int ioaddr = dev->base_addr; |
463 | int i; | 463 | int i; |
464 | 464 | ||
465 | EL3WINDOW(0); | 465 | EL3WINDOW(0); |
@@ -567,7 +567,7 @@ static int el3_open(struct net_device *dev) | |||
567 | static void el3_tx_timeout(struct net_device *dev) | 567 | static void el3_tx_timeout(struct net_device *dev) |
568 | { | 568 | { |
569 | struct el3_private *lp = netdev_priv(dev); | 569 | struct el3_private *lp = netdev_priv(dev); |
570 | kio_addr_t ioaddr = dev->base_addr; | 570 | unsigned int ioaddr = dev->base_addr; |
571 | 571 | ||
572 | printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name); | 572 | printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name); |
573 | dump_status(dev); | 573 | dump_status(dev); |
@@ -582,7 +582,7 @@ static void el3_tx_timeout(struct net_device *dev) | |||
582 | static void pop_tx_status(struct net_device *dev) | 582 | static void pop_tx_status(struct net_device *dev) |
583 | { | 583 | { |
584 | struct el3_private *lp = netdev_priv(dev); | 584 | struct el3_private *lp = netdev_priv(dev); |
585 | kio_addr_t ioaddr = dev->base_addr; | 585 | unsigned int ioaddr = dev->base_addr; |
586 | int i; | 586 | int i; |
587 | 587 | ||
588 | /* Clear the Tx status stack. */ | 588 | /* Clear the Tx status stack. */ |
@@ -604,7 +604,7 @@ static void pop_tx_status(struct net_device *dev) | |||
604 | 604 | ||
605 | static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 605 | static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
606 | { | 606 | { |
607 | kio_addr_t ioaddr = dev->base_addr; | 607 | unsigned int ioaddr = dev->base_addr; |
608 | struct el3_private *priv = netdev_priv(dev); | 608 | struct el3_private *priv = netdev_priv(dev); |
609 | unsigned long flags; | 609 | unsigned long flags; |
610 | 610 | ||
@@ -641,7 +641,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
641 | { | 641 | { |
642 | struct net_device *dev = (struct net_device *) dev_id; | 642 | struct net_device *dev = (struct net_device *) dev_id; |
643 | struct el3_private *lp = netdev_priv(dev); | 643 | struct el3_private *lp = netdev_priv(dev); |
644 | kio_addr_t ioaddr; | 644 | unsigned int ioaddr; |
645 | __u16 status; | 645 | __u16 status; |
646 | int i = 0, handled = 1; | 646 | int i = 0, handled = 1; |
647 | 647 | ||
@@ -727,7 +727,7 @@ static void media_check(unsigned long arg) | |||
727 | { | 727 | { |
728 | struct net_device *dev = (struct net_device *)(arg); | 728 | struct net_device *dev = (struct net_device *)(arg); |
729 | struct el3_private *lp = netdev_priv(dev); | 729 | struct el3_private *lp = netdev_priv(dev); |
730 | kio_addr_t ioaddr = dev->base_addr; | 730 | unsigned int ioaddr = dev->base_addr; |
731 | u16 media, errs; | 731 | u16 media, errs; |
732 | unsigned long flags; | 732 | unsigned long flags; |
733 | 733 | ||
@@ -828,7 +828,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev) | |||
828 | static void update_stats(struct net_device *dev) | 828 | static void update_stats(struct net_device *dev) |
829 | { | 829 | { |
830 | struct el3_private *lp = netdev_priv(dev); | 830 | struct el3_private *lp = netdev_priv(dev); |
831 | kio_addr_t ioaddr = dev->base_addr; | 831 | unsigned int ioaddr = dev->base_addr; |
832 | 832 | ||
833 | DEBUG(2, "%s: updating the statistics.\n", dev->name); | 833 | DEBUG(2, "%s: updating the statistics.\n", dev->name); |
834 | /* Turn off statistics updates while reading. */ | 834 | /* Turn off statistics updates while reading. */ |
@@ -855,7 +855,7 @@ static void update_stats(struct net_device *dev) | |||
855 | static int el3_rx(struct net_device *dev) | 855 | static int el3_rx(struct net_device *dev) |
856 | { | 856 | { |
857 | struct el3_private *lp = netdev_priv(dev); | 857 | struct el3_private *lp = netdev_priv(dev); |
858 | kio_addr_t ioaddr = dev->base_addr; | 858 | unsigned int ioaddr = dev->base_addr; |
859 | int worklimit = 32; | 859 | int worklimit = 32; |
860 | short rx_status; | 860 | short rx_status; |
861 | 861 | ||
@@ -909,7 +909,7 @@ static void set_multicast_list(struct net_device *dev) | |||
909 | { | 909 | { |
910 | struct el3_private *lp = netdev_priv(dev); | 910 | struct el3_private *lp = netdev_priv(dev); |
911 | struct pcmcia_device *link = lp->p_dev; | 911 | struct pcmcia_device *link = lp->p_dev; |
912 | kio_addr_t ioaddr = dev->base_addr; | 912 | unsigned int ioaddr = dev->base_addr; |
913 | u16 opts = SetRxFilter | RxStation | RxBroadcast; | 913 | u16 opts = SetRxFilter | RxStation | RxBroadcast; |
914 | 914 | ||
915 | if (!pcmcia_dev_present(link)) return; | 915 | if (!pcmcia_dev_present(link)) return; |
@@ -924,7 +924,7 @@ static int el3_close(struct net_device *dev) | |||
924 | { | 924 | { |
925 | struct el3_private *lp = netdev_priv(dev); | 925 | struct el3_private *lp = netdev_priv(dev); |
926 | struct pcmcia_device *link = lp->p_dev; | 926 | struct pcmcia_device *link = lp->p_dev; |
927 | kio_addr_t ioaddr = dev->base_addr; | 927 | unsigned int ioaddr = dev->base_addr; |
928 | 928 | ||
929 | DEBUG(1, "%s: shutting down ethercard.\n", dev->name); | 929 | DEBUG(1, "%s: shutting down ethercard.\n", dev->name); |
930 | 930 | ||
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 6d342f6c14f6..e8a63e483a2b 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -96,8 +96,8 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); | |||
96 | static void ei_watchdog(u_long arg); | 96 | static void ei_watchdog(u_long arg); |
97 | static void axnet_reset_8390(struct net_device *dev); | 97 | static void axnet_reset_8390(struct net_device *dev); |
98 | 98 | ||
99 | static int mdio_read(kio_addr_t addr, int phy_id, int loc); | 99 | static int mdio_read(unsigned int addr, int phy_id, int loc); |
100 | static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value); | 100 | static void mdio_write(unsigned int addr, int phy_id, int loc, int value); |
101 | 101 | ||
102 | static void get_8390_hdr(struct net_device *, | 102 | static void get_8390_hdr(struct net_device *, |
103 | struct e8390_pkt_hdr *, int); | 103 | struct e8390_pkt_hdr *, int); |
@@ -203,7 +203,7 @@ static void axnet_detach(struct pcmcia_device *link) | |||
203 | static int get_prom(struct pcmcia_device *link) | 203 | static int get_prom(struct pcmcia_device *link) |
204 | { | 204 | { |
205 | struct net_device *dev = link->priv; | 205 | struct net_device *dev = link->priv; |
206 | kio_addr_t ioaddr = dev->base_addr; | 206 | unsigned int ioaddr = dev->base_addr; |
207 | int i, j; | 207 | int i, j; |
208 | 208 | ||
209 | /* This is based on drivers/net/ne.c */ | 209 | /* This is based on drivers/net/ne.c */ |
@@ -473,7 +473,7 @@ static int axnet_resume(struct pcmcia_device *link) | |||
473 | #define MDIO_MASK 0x0f | 473 | #define MDIO_MASK 0x0f |
474 | #define MDIO_ENB_IN 0x02 | 474 | #define MDIO_ENB_IN 0x02 |
475 | 475 | ||
476 | static void mdio_sync(kio_addr_t addr) | 476 | static void mdio_sync(unsigned int addr) |
477 | { | 477 | { |
478 | int bits; | 478 | int bits; |
479 | for (bits = 0; bits < 32; bits++) { | 479 | for (bits = 0; bits < 32; bits++) { |
@@ -482,7 +482,7 @@ static void mdio_sync(kio_addr_t addr) | |||
482 | } | 482 | } |
483 | } | 483 | } |
484 | 484 | ||
485 | static int mdio_read(kio_addr_t addr, int phy_id, int loc) | 485 | static int mdio_read(unsigned int addr, int phy_id, int loc) |
486 | { | 486 | { |
487 | u_int cmd = (0xf6<<10)|(phy_id<<5)|loc; | 487 | u_int cmd = (0xf6<<10)|(phy_id<<5)|loc; |
488 | int i, retval = 0; | 488 | int i, retval = 0; |
@@ -501,7 +501,7 @@ static int mdio_read(kio_addr_t addr, int phy_id, int loc) | |||
501 | return (retval>>1) & 0xffff; | 501 | return (retval>>1) & 0xffff; |
502 | } | 502 | } |
503 | 503 | ||
504 | static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value) | 504 | static void mdio_write(unsigned int addr, int phy_id, int loc, int value) |
505 | { | 505 | { |
506 | u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; | 506 | u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; |
507 | int i; | 507 | int i; |
@@ -575,7 +575,7 @@ static int axnet_close(struct net_device *dev) | |||
575 | 575 | ||
576 | static void axnet_reset_8390(struct net_device *dev) | 576 | static void axnet_reset_8390(struct net_device *dev) |
577 | { | 577 | { |
578 | kio_addr_t nic_base = dev->base_addr; | 578 | unsigned int nic_base = dev->base_addr; |
579 | int i; | 579 | int i; |
580 | 580 | ||
581 | ei_status.txing = ei_status.dmaing = 0; | 581 | ei_status.txing = ei_status.dmaing = 0; |
@@ -610,8 +610,8 @@ static void ei_watchdog(u_long arg) | |||
610 | { | 610 | { |
611 | struct net_device *dev = (struct net_device *)(arg); | 611 | struct net_device *dev = (struct net_device *)(arg); |
612 | axnet_dev_t *info = PRIV(dev); | 612 | axnet_dev_t *info = PRIV(dev); |
613 | kio_addr_t nic_base = dev->base_addr; | 613 | unsigned int nic_base = dev->base_addr; |
614 | kio_addr_t mii_addr = nic_base + AXNET_MII_EEP; | 614 | unsigned int mii_addr = nic_base + AXNET_MII_EEP; |
615 | u_short link; | 615 | u_short link; |
616 | 616 | ||
617 | if (!netif_device_present(dev)) goto reschedule; | 617 | if (!netif_device_present(dev)) goto reschedule; |
@@ -681,7 +681,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
681 | { | 681 | { |
682 | axnet_dev_t *info = PRIV(dev); | 682 | axnet_dev_t *info = PRIV(dev); |
683 | u16 *data = (u16 *)&rq->ifr_ifru; | 683 | u16 *data = (u16 *)&rq->ifr_ifru; |
684 | kio_addr_t mii_addr = dev->base_addr + AXNET_MII_EEP; | 684 | unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP; |
685 | switch (cmd) { | 685 | switch (cmd) { |
686 | case SIOCGMIIPHY: | 686 | case SIOCGMIIPHY: |
687 | data[0] = info->phy_id; | 687 | data[0] = info->phy_id; |
@@ -703,7 +703,7 @@ static void get_8390_hdr(struct net_device *dev, | |||
703 | struct e8390_pkt_hdr *hdr, | 703 | struct e8390_pkt_hdr *hdr, |
704 | int ring_page) | 704 | int ring_page) |
705 | { | 705 | { |
706 | kio_addr_t nic_base = dev->base_addr; | 706 | unsigned int nic_base = dev->base_addr; |
707 | 707 | ||
708 | outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ | 708 | outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ |
709 | outb_p(ring_page, nic_base + EN0_RSARHI); | 709 | outb_p(ring_page, nic_base + EN0_RSARHI); |
@@ -721,7 +721,7 @@ static void get_8390_hdr(struct net_device *dev, | |||
721 | static void block_input(struct net_device *dev, int count, | 721 | static void block_input(struct net_device *dev, int count, |
722 | struct sk_buff *skb, int ring_offset) | 722 | struct sk_buff *skb, int ring_offset) |
723 | { | 723 | { |
724 | kio_addr_t nic_base = dev->base_addr; | 724 | unsigned int nic_base = dev->base_addr; |
725 | int xfer_count = count; | 725 | int xfer_count = count; |
726 | char *buf = skb->data; | 726 | char *buf = skb->data; |
727 | 727 | ||
@@ -744,7 +744,7 @@ static void block_input(struct net_device *dev, int count, | |||
744 | static void block_output(struct net_device *dev, int count, | 744 | static void block_output(struct net_device *dev, int count, |
745 | const u_char *buf, const int start_page) | 745 | const u_char *buf, const int start_page) |
746 | { | 746 | { |
747 | kio_addr_t nic_base = dev->base_addr; | 747 | unsigned int nic_base = dev->base_addr; |
748 | 748 | ||
749 | #ifdef PCMCIA_DEBUG | 749 | #ifdef PCMCIA_DEBUG |
750 | if (ei_debug > 4) | 750 | if (ei_debug > 4) |
@@ -991,7 +991,7 @@ static int ax_open(struct net_device *dev) | |||
991 | * | 991 | * |
992 | * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done. | 992 | * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done. |
993 | */ | 993 | */ |
994 | int ax_close(struct net_device *dev) | 994 | static int ax_close(struct net_device *dev) |
995 | { | 995 | { |
996 | unsigned long flags; | 996 | unsigned long flags; |
997 | 997 | ||
@@ -1014,7 +1014,7 @@ int ax_close(struct net_device *dev) | |||
1014 | * completed (or failed) - i.e. never posted a Tx related interrupt. | 1014 | * completed (or failed) - i.e. never posted a Tx related interrupt. |
1015 | */ | 1015 | */ |
1016 | 1016 | ||
1017 | void ei_tx_timeout(struct net_device *dev) | 1017 | static void ei_tx_timeout(struct net_device *dev) |
1018 | { | 1018 | { |
1019 | long e8390_base = dev->base_addr; | 1019 | long e8390_base = dev->base_addr; |
1020 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | 1020 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); |
@@ -1087,8 +1087,8 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1087 | 1087 | ||
1088 | ei_local->irqlock = 1; | 1088 | ei_local->irqlock = 1; |
1089 | 1089 | ||
1090 | send_length = ETH_ZLEN < length ? length : ETH_ZLEN; | 1090 | send_length = max(length, ETH_ZLEN); |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
1093 | * We have two Tx slots available for use. Find the first free | 1093 | * We have two Tx slots available for use. Find the first free |
1094 | * slot, and then perform some sanity checks. With two Tx bufs, | 1094 | * slot, and then perform some sanity checks. With two Tx bufs, |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 949c6df74c97..8f328a03847b 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -298,7 +298,8 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | |||
298 | static int mfc_try_io_port(struct pcmcia_device *link) | 298 | static int mfc_try_io_port(struct pcmcia_device *link) |
299 | { | 299 | { |
300 | int i, ret; | 300 | int i, ret; |
301 | static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | 301 | static const unsigned int serial_base[5] = |
302 | { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | ||
302 | 303 | ||
303 | for (i = 0; i < 5; i++) { | 304 | for (i = 0; i < 5; i++) { |
304 | link->io.BasePort2 = serial_base[i]; | 305 | link->io.BasePort2 = serial_base[i]; |
@@ -316,7 +317,7 @@ static int mfc_try_io_port(struct pcmcia_device *link) | |||
316 | static int ungermann_try_io_port(struct pcmcia_device *link) | 317 | static int ungermann_try_io_port(struct pcmcia_device *link) |
317 | { | 318 | { |
318 | int ret; | 319 | int ret; |
319 | kio_addr_t ioaddr; | 320 | unsigned int ioaddr; |
320 | /* | 321 | /* |
321 | Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360 | 322 | Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360 |
322 | 0x380,0x3c0 only for ioport. | 323 | 0x380,0x3c0 only for ioport. |
@@ -342,7 +343,7 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
342 | cisparse_t parse; | 343 | cisparse_t parse; |
343 | u_short buf[32]; | 344 | u_short buf[32]; |
344 | int i, last_fn = 0, last_ret = 0, ret; | 345 | int i, last_fn = 0, last_ret = 0, ret; |
345 | kio_addr_t ioaddr; | 346 | unsigned int ioaddr; |
346 | cardtype_t cardtype; | 347 | cardtype_t cardtype; |
347 | char *card_name = "unknown"; | 348 | char *card_name = "unknown"; |
348 | u_char *node_id; | 349 | u_char *node_id; |
@@ -610,7 +611,7 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link) | |||
610 | u_char __iomem *base; | 611 | u_char __iomem *base; |
611 | int i, j; | 612 | int i, j; |
612 | struct net_device *dev = link->priv; | 613 | struct net_device *dev = link->priv; |
613 | kio_addr_t ioaddr; | 614 | unsigned int ioaddr; |
614 | 615 | ||
615 | /* Allocate a small memory window */ | 616 | /* Allocate a small memory window */ |
616 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 617 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
@@ -735,7 +736,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) | |||
735 | { | 736 | { |
736 | struct net_device *dev = dev_id; | 737 | struct net_device *dev = dev_id; |
737 | local_info_t *lp = netdev_priv(dev); | 738 | local_info_t *lp = netdev_priv(dev); |
738 | kio_addr_t ioaddr; | 739 | unsigned int ioaddr; |
739 | unsigned short tx_stat, rx_stat; | 740 | unsigned short tx_stat, rx_stat; |
740 | 741 | ||
741 | ioaddr = dev->base_addr; | 742 | ioaddr = dev->base_addr; |
@@ -789,7 +790,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) | |||
789 | static void fjn_tx_timeout(struct net_device *dev) | 790 | static void fjn_tx_timeout(struct net_device *dev) |
790 | { | 791 | { |
791 | struct local_info_t *lp = netdev_priv(dev); | 792 | struct local_info_t *lp = netdev_priv(dev); |
792 | kio_addr_t ioaddr = dev->base_addr; | 793 | unsigned int ioaddr = dev->base_addr; |
793 | 794 | ||
794 | printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n", | 795 | printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n", |
795 | dev->name, htons(inw(ioaddr + TX_STATUS)), | 796 | dev->name, htons(inw(ioaddr + TX_STATUS)), |
@@ -819,7 +820,7 @@ static void fjn_tx_timeout(struct net_device *dev) | |||
819 | static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) | 820 | static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) |
820 | { | 821 | { |
821 | struct local_info_t *lp = netdev_priv(dev); | 822 | struct local_info_t *lp = netdev_priv(dev); |
822 | kio_addr_t ioaddr = dev->base_addr; | 823 | unsigned int ioaddr = dev->base_addr; |
823 | short length = skb->len; | 824 | short length = skb->len; |
824 | 825 | ||
825 | if (length < ETH_ZLEN) | 826 | if (length < ETH_ZLEN) |
@@ -892,7 +893,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
892 | static void fjn_reset(struct net_device *dev) | 893 | static void fjn_reset(struct net_device *dev) |
893 | { | 894 | { |
894 | struct local_info_t *lp = netdev_priv(dev); | 895 | struct local_info_t *lp = netdev_priv(dev); |
895 | kio_addr_t ioaddr = dev->base_addr; | 896 | unsigned int ioaddr = dev->base_addr; |
896 | int i; | 897 | int i; |
897 | 898 | ||
898 | DEBUG(4, "fjn_reset(%s) called.\n",dev->name); | 899 | DEBUG(4, "fjn_reset(%s) called.\n",dev->name); |
@@ -971,7 +972,7 @@ static void fjn_reset(struct net_device *dev) | |||
971 | static void fjn_rx(struct net_device *dev) | 972 | static void fjn_rx(struct net_device *dev) |
972 | { | 973 | { |
973 | struct local_info_t *lp = netdev_priv(dev); | 974 | struct local_info_t *lp = netdev_priv(dev); |
974 | kio_addr_t ioaddr = dev->base_addr; | 975 | unsigned int ioaddr = dev->base_addr; |
975 | int boguscount = 10; /* 5 -> 10: by agy 19940922 */ | 976 | int boguscount = 10; /* 5 -> 10: by agy 19940922 */ |
976 | 977 | ||
977 | DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n", | 978 | DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n", |
@@ -1125,7 +1126,7 @@ static int fjn_close(struct net_device *dev) | |||
1125 | { | 1126 | { |
1126 | struct local_info_t *lp = netdev_priv(dev); | 1127 | struct local_info_t *lp = netdev_priv(dev); |
1127 | struct pcmcia_device *link = lp->p_dev; | 1128 | struct pcmcia_device *link = lp->p_dev; |
1128 | kio_addr_t ioaddr = dev->base_addr; | 1129 | unsigned int ioaddr = dev->base_addr; |
1129 | 1130 | ||
1130 | DEBUG(4, "fjn_close('%s').\n", dev->name); | 1131 | DEBUG(4, "fjn_close('%s').\n", dev->name); |
1131 | 1132 | ||
@@ -1168,7 +1169,7 @@ static struct net_device_stats *fjn_get_stats(struct net_device *dev) | |||
1168 | 1169 | ||
1169 | static void set_rx_mode(struct net_device *dev) | 1170 | static void set_rx_mode(struct net_device *dev) |
1170 | { | 1171 | { |
1171 | kio_addr_t ioaddr = dev->base_addr; | 1172 | unsigned int ioaddr = dev->base_addr; |
1172 | u_char mc_filter[8]; /* Multicast hash filter */ | 1173 | u_char mc_filter[8]; /* Multicast hash filter */ |
1173 | u_long flags; | 1174 | u_long flags; |
1174 | int i; | 1175 | int i; |
@@ -1197,8 +1198,7 @@ static void set_rx_mode(struct net_device *dev) | |||
1197 | outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ | 1198 | outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ |
1198 | } else { | 1199 | } else { |
1199 | struct dev_mc_list *mclist; | 1200 | struct dev_mc_list *mclist; |
1200 | int i; | 1201 | |
1201 | |||
1202 | memset(mc_filter, 0, sizeof(mc_filter)); | 1202 | memset(mc_filter, 0, sizeof(mc_filter)); |
1203 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | 1203 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; |
1204 | i++, mclist = mclist->next) { | 1204 | i++, mclist = mclist->next) { |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index a355a93b908b..cfcbea9b7e2e 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
@@ -518,7 +518,7 @@ mace_read | |||
518 | assuming that during normal operation, the MACE is always in | 518 | assuming that during normal operation, the MACE is always in |
519 | bank 0. | 519 | bank 0. |
520 | ---------------------------------------------------------------------------- */ | 520 | ---------------------------------------------------------------------------- */ |
521 | static int mace_read(mace_private *lp, kio_addr_t ioaddr, int reg) | 521 | static int mace_read(mace_private *lp, unsigned int ioaddr, int reg) |
522 | { | 522 | { |
523 | int data = 0xFF; | 523 | int data = 0xFF; |
524 | unsigned long flags; | 524 | unsigned long flags; |
@@ -545,7 +545,8 @@ mace_write | |||
545 | are assuming that during normal operation, the MACE is always in | 545 | are assuming that during normal operation, the MACE is always in |
546 | bank 0. | 546 | bank 0. |
547 | ---------------------------------------------------------------------------- */ | 547 | ---------------------------------------------------------------------------- */ |
548 | static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data) | 548 | static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, |
549 | int data) | ||
549 | { | 550 | { |
550 | unsigned long flags; | 551 | unsigned long flags; |
551 | 552 | ||
@@ -567,7 +568,7 @@ static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data) | |||
567 | mace_init | 568 | mace_init |
568 | Resets the MACE chip. | 569 | Resets the MACE chip. |
569 | ---------------------------------------------------------------------------- */ | 570 | ---------------------------------------------------------------------------- */ |
570 | static int mace_init(mace_private *lp, kio_addr_t ioaddr, char *enet_addr) | 571 | static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) |
571 | { | 572 | { |
572 | int i; | 573 | int i; |
573 | int ct = 0; | 574 | int ct = 0; |
@@ -657,7 +658,7 @@ static int nmclan_config(struct pcmcia_device *link) | |||
657 | tuple_t tuple; | 658 | tuple_t tuple; |
658 | u_char buf[64]; | 659 | u_char buf[64]; |
659 | int i, last_ret, last_fn; | 660 | int i, last_ret, last_fn; |
660 | kio_addr_t ioaddr; | 661 | unsigned int ioaddr; |
661 | DECLARE_MAC_BUF(mac); | 662 | DECLARE_MAC_BUF(mac); |
662 | 663 | ||
663 | DEBUG(0, "nmclan_config(0x%p)\n", link); | 664 | DEBUG(0, "nmclan_config(0x%p)\n", link); |
@@ -839,7 +840,7 @@ mace_open | |||
839 | ---------------------------------------------------------------------------- */ | 840 | ---------------------------------------------------------------------------- */ |
840 | static int mace_open(struct net_device *dev) | 841 | static int mace_open(struct net_device *dev) |
841 | { | 842 | { |
842 | kio_addr_t ioaddr = dev->base_addr; | 843 | unsigned int ioaddr = dev->base_addr; |
843 | mace_private *lp = netdev_priv(dev); | 844 | mace_private *lp = netdev_priv(dev); |
844 | struct pcmcia_device *link = lp->p_dev; | 845 | struct pcmcia_device *link = lp->p_dev; |
845 | 846 | ||
@@ -862,7 +863,7 @@ mace_close | |||
862 | ---------------------------------------------------------------------------- */ | 863 | ---------------------------------------------------------------------------- */ |
863 | static int mace_close(struct net_device *dev) | 864 | static int mace_close(struct net_device *dev) |
864 | { | 865 | { |
865 | kio_addr_t ioaddr = dev->base_addr; | 866 | unsigned int ioaddr = dev->base_addr; |
866 | mace_private *lp = netdev_priv(dev); | 867 | mace_private *lp = netdev_priv(dev); |
867 | struct pcmcia_device *link = lp->p_dev; | 868 | struct pcmcia_device *link = lp->p_dev; |
868 | 869 | ||
@@ -935,7 +936,7 @@ static void mace_tx_timeout(struct net_device *dev) | |||
935 | static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev) | 936 | static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev) |
936 | { | 937 | { |
937 | mace_private *lp = netdev_priv(dev); | 938 | mace_private *lp = netdev_priv(dev); |
938 | kio_addr_t ioaddr = dev->base_addr; | 939 | unsigned int ioaddr = dev->base_addr; |
939 | 940 | ||
940 | netif_stop_queue(dev); | 941 | netif_stop_queue(dev); |
941 | 942 | ||
@@ -996,7 +997,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
996 | { | 997 | { |
997 | struct net_device *dev = (struct net_device *) dev_id; | 998 | struct net_device *dev = (struct net_device *) dev_id; |
998 | mace_private *lp = netdev_priv(dev); | 999 | mace_private *lp = netdev_priv(dev); |
999 | kio_addr_t ioaddr; | 1000 | unsigned int ioaddr; |
1000 | int status; | 1001 | int status; |
1001 | int IntrCnt = MACE_MAX_IR_ITERATIONS; | 1002 | int IntrCnt = MACE_MAX_IR_ITERATIONS; |
1002 | 1003 | ||
@@ -1140,7 +1141,7 @@ mace_rx | |||
1140 | static int mace_rx(struct net_device *dev, unsigned char RxCnt) | 1141 | static int mace_rx(struct net_device *dev, unsigned char RxCnt) |
1141 | { | 1142 | { |
1142 | mace_private *lp = netdev_priv(dev); | 1143 | mace_private *lp = netdev_priv(dev); |
1143 | kio_addr_t ioaddr = dev->base_addr; | 1144 | unsigned int ioaddr = dev->base_addr; |
1144 | unsigned char rx_framecnt; | 1145 | unsigned char rx_framecnt; |
1145 | unsigned short rx_status; | 1146 | unsigned short rx_status; |
1146 | 1147 | ||
@@ -1302,7 +1303,7 @@ update_stats | |||
1302 | card's SRAM fast enough. If this happens, something is | 1303 | card's SRAM fast enough. If this happens, something is |
1303 | seriously wrong with the hardware. | 1304 | seriously wrong with the hardware. |
1304 | ---------------------------------------------------------------------------- */ | 1305 | ---------------------------------------------------------------------------- */ |
1305 | static void update_stats(kio_addr_t ioaddr, struct net_device *dev) | 1306 | static void update_stats(unsigned int ioaddr, struct net_device *dev) |
1306 | { | 1307 | { |
1307 | mace_private *lp = netdev_priv(dev); | 1308 | mace_private *lp = netdev_priv(dev); |
1308 | 1309 | ||
@@ -1448,7 +1449,7 @@ static void restore_multicast_list(struct net_device *dev) | |||
1448 | mace_private *lp = netdev_priv(dev); | 1449 | mace_private *lp = netdev_priv(dev); |
1449 | int num_addrs = lp->multicast_num_addrs; | 1450 | int num_addrs = lp->multicast_num_addrs; |
1450 | int *ladrf = lp->multicast_ladrf; | 1451 | int *ladrf = lp->multicast_ladrf; |
1451 | kio_addr_t ioaddr = dev->base_addr; | 1452 | unsigned int ioaddr = dev->base_addr; |
1452 | int i; | 1453 | int i; |
1453 | 1454 | ||
1454 | DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", | 1455 | DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", |
@@ -1540,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev) | |||
1540 | 1541 | ||
1541 | static void restore_multicast_list(struct net_device *dev) | 1542 | static void restore_multicast_list(struct net_device *dev) |
1542 | { | 1543 | { |
1543 | kio_addr_t ioaddr = dev->base_addr; | 1544 | unsigned int ioaddr = dev->base_addr; |
1544 | mace_private *lp = netdev_priv(dev); | 1545 | mace_private *lp = netdev_priv(dev); |
1545 | 1546 | ||
1546 | DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name, | 1547 | DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name, |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 9ba56aa26a1b..6323988dfa1d 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -349,7 +349,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link) | |||
349 | static hw_info_t *get_prom(struct pcmcia_device *link) | 349 | static hw_info_t *get_prom(struct pcmcia_device *link) |
350 | { | 350 | { |
351 | struct net_device *dev = link->priv; | 351 | struct net_device *dev = link->priv; |
352 | kio_addr_t ioaddr = dev->base_addr; | 352 | unsigned int ioaddr = dev->base_addr; |
353 | u_char prom[32]; | 353 | u_char prom[32]; |
354 | int i, j; | 354 | int i, j; |
355 | 355 | ||
@@ -425,7 +425,7 @@ static hw_info_t *get_dl10019(struct pcmcia_device *link) | |||
425 | static hw_info_t *get_ax88190(struct pcmcia_device *link) | 425 | static hw_info_t *get_ax88190(struct pcmcia_device *link) |
426 | { | 426 | { |
427 | struct net_device *dev = link->priv; | 427 | struct net_device *dev = link->priv; |
428 | kio_addr_t ioaddr = dev->base_addr; | 428 | unsigned int ioaddr = dev->base_addr; |
429 | int i, j; | 429 | int i, j; |
430 | 430 | ||
431 | /* Not much of a test, but the alternatives are messy */ | 431 | /* Not much of a test, but the alternatives are messy */ |
@@ -521,7 +521,7 @@ static int pcnet_config(struct pcmcia_device *link) | |||
521 | int i, last_ret, last_fn, start_pg, stop_pg, cm_offset; | 521 | int i, last_ret, last_fn, start_pg, stop_pg, cm_offset; |
522 | int has_shmem = 0; | 522 | int has_shmem = 0; |
523 | u_short buf[64]; | 523 | u_short buf[64]; |
524 | hw_info_t *hw_info; | 524 | hw_info_t *local_hw_info; |
525 | DECLARE_MAC_BUF(mac); | 525 | DECLARE_MAC_BUF(mac); |
526 | 526 | ||
527 | DEBUG(0, "pcnet_config(0x%p)\n", link); | 527 | DEBUG(0, "pcnet_config(0x%p)\n", link); |
@@ -590,23 +590,23 @@ static int pcnet_config(struct pcmcia_device *link) | |||
590 | dev->if_port = 0; | 590 | dev->if_port = 0; |
591 | } | 591 | } |
592 | 592 | ||
593 | hw_info = get_hwinfo(link); | 593 | local_hw_info = get_hwinfo(link); |
594 | if (hw_info == NULL) | 594 | if (local_hw_info == NULL) |
595 | hw_info = get_prom(link); | 595 | local_hw_info = get_prom(link); |
596 | if (hw_info == NULL) | 596 | if (local_hw_info == NULL) |
597 | hw_info = get_dl10019(link); | 597 | local_hw_info = get_dl10019(link); |
598 | if (hw_info == NULL) | 598 | if (local_hw_info == NULL) |
599 | hw_info = get_ax88190(link); | 599 | local_hw_info = get_ax88190(link); |
600 | if (hw_info == NULL) | 600 | if (local_hw_info == NULL) |
601 | hw_info = get_hwired(link); | 601 | local_hw_info = get_hwired(link); |
602 | 602 | ||
603 | if (hw_info == NULL) { | 603 | if (local_hw_info == NULL) { |
604 | printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" | 604 | printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" |
605 | " address for io base %#3lx\n", dev->base_addr); | 605 | " address for io base %#3lx\n", dev->base_addr); |
606 | goto failed; | 606 | goto failed; |
607 | } | 607 | } |
608 | 608 | ||
609 | info->flags = hw_info->flags; | 609 | info->flags = local_hw_info->flags; |
610 | /* Check for user overrides */ | 610 | /* Check for user overrides */ |
611 | info->flags |= (delay_output) ? DELAY_OUTPUT : 0; | 611 | info->flags |= (delay_output) ? DELAY_OUTPUT : 0; |
612 | if ((link->manf_id == MANFID_SOCKET) && | 612 | if ((link->manf_id == MANFID_SOCKET) && |
@@ -756,7 +756,7 @@ static int pcnet_resume(struct pcmcia_device *link) | |||
756 | #define MDIO_DATA_READ 0x10 | 756 | #define MDIO_DATA_READ 0x10 |
757 | #define MDIO_MASK 0x0f | 757 | #define MDIO_MASK 0x0f |
758 | 758 | ||
759 | static void mdio_sync(kio_addr_t addr) | 759 | static void mdio_sync(unsigned int addr) |
760 | { | 760 | { |
761 | int bits, mask = inb(addr) & MDIO_MASK; | 761 | int bits, mask = inb(addr) & MDIO_MASK; |
762 | for (bits = 0; bits < 32; bits++) { | 762 | for (bits = 0; bits < 32; bits++) { |
@@ -765,7 +765,7 @@ static void mdio_sync(kio_addr_t addr) | |||
765 | } | 765 | } |
766 | } | 766 | } |
767 | 767 | ||
768 | static int mdio_read(kio_addr_t addr, int phy_id, int loc) | 768 | static int mdio_read(unsigned int addr, int phy_id, int loc) |
769 | { | 769 | { |
770 | u_int cmd = (0x06<<10)|(phy_id<<5)|loc; | 770 | u_int cmd = (0x06<<10)|(phy_id<<5)|loc; |
771 | int i, retval = 0, mask = inb(addr) & MDIO_MASK; | 771 | int i, retval = 0, mask = inb(addr) & MDIO_MASK; |
@@ -784,7 +784,7 @@ static int mdio_read(kio_addr_t addr, int phy_id, int loc) | |||
784 | return (retval>>1) & 0xffff; | 784 | return (retval>>1) & 0xffff; |
785 | } | 785 | } |
786 | 786 | ||
787 | static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value) | 787 | static void mdio_write(unsigned int addr, int phy_id, int loc, int value) |
788 | { | 788 | { |
789 | u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; | 789 | u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; |
790 | int i, mask = inb(addr) & MDIO_MASK; | 790 | int i, mask = inb(addr) & MDIO_MASK; |
@@ -818,10 +818,10 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value) | |||
818 | 818 | ||
819 | #define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */ | 819 | #define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */ |
820 | 820 | ||
821 | static int read_eeprom(kio_addr_t ioaddr, int location) | 821 | static int read_eeprom(unsigned int ioaddr, int location) |
822 | { | 822 | { |
823 | int i, retval = 0; | 823 | int i, retval = 0; |
824 | kio_addr_t ee_addr = ioaddr + DLINK_EEPROM; | 824 | unsigned int ee_addr = ioaddr + DLINK_EEPROM; |
825 | int read_cmd = location | (EE_READ_CMD << 8); | 825 | int read_cmd = location | (EE_READ_CMD << 8); |
826 | 826 | ||
827 | outb(0, ee_addr); | 827 | outb(0, ee_addr); |
@@ -852,10 +852,10 @@ static int read_eeprom(kio_addr_t ioaddr, int location) | |||
852 | In ASIC mode, EE_ADOT is used to output the data to the ASIC. | 852 | In ASIC mode, EE_ADOT is used to output the data to the ASIC. |
853 | */ | 853 | */ |
854 | 854 | ||
855 | static void write_asic(kio_addr_t ioaddr, int location, short asic_data) | 855 | static void write_asic(unsigned int ioaddr, int location, short asic_data) |
856 | { | 856 | { |
857 | int i; | 857 | int i; |
858 | kio_addr_t ee_addr = ioaddr + DLINK_EEPROM; | 858 | unsigned int ee_addr = ioaddr + DLINK_EEPROM; |
859 | short dataval; | 859 | short dataval; |
860 | int read_cmd = location | (EE_READ_CMD << 8); | 860 | int read_cmd = location | (EE_READ_CMD << 8); |
861 | 861 | ||
@@ -897,7 +897,7 @@ static void write_asic(kio_addr_t ioaddr, int location, short asic_data) | |||
897 | 897 | ||
898 | static void set_misc_reg(struct net_device *dev) | 898 | static void set_misc_reg(struct net_device *dev) |
899 | { | 899 | { |
900 | kio_addr_t nic_base = dev->base_addr; | 900 | unsigned int nic_base = dev->base_addr; |
901 | pcnet_dev_t *info = PRIV(dev); | 901 | pcnet_dev_t *info = PRIV(dev); |
902 | u_char tmp; | 902 | u_char tmp; |
903 | 903 | ||
@@ -936,7 +936,7 @@ static void set_misc_reg(struct net_device *dev) | |||
936 | static void mii_phy_probe(struct net_device *dev) | 936 | static void mii_phy_probe(struct net_device *dev) |
937 | { | 937 | { |
938 | pcnet_dev_t *info = PRIV(dev); | 938 | pcnet_dev_t *info = PRIV(dev); |
939 | kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO; | 939 | unsigned int mii_addr = dev->base_addr + DLINK_GPIO; |
940 | int i; | 940 | int i; |
941 | u_int tmp, phyid; | 941 | u_int tmp, phyid; |
942 | 942 | ||
@@ -1014,7 +1014,7 @@ static int pcnet_close(struct net_device *dev) | |||
1014 | 1014 | ||
1015 | static void pcnet_reset_8390(struct net_device *dev) | 1015 | static void pcnet_reset_8390(struct net_device *dev) |
1016 | { | 1016 | { |
1017 | kio_addr_t nic_base = dev->base_addr; | 1017 | unsigned int nic_base = dev->base_addr; |
1018 | int i; | 1018 | int i; |
1019 | 1019 | ||
1020 | ei_status.txing = ei_status.dmaing = 0; | 1020 | ei_status.txing = ei_status.dmaing = 0; |
@@ -1074,8 +1074,8 @@ static void ei_watchdog(u_long arg) | |||
1074 | { | 1074 | { |
1075 | struct net_device *dev = (struct net_device *)arg; | 1075 | struct net_device *dev = (struct net_device *)arg; |
1076 | pcnet_dev_t *info = PRIV(dev); | 1076 | pcnet_dev_t *info = PRIV(dev); |
1077 | kio_addr_t nic_base = dev->base_addr; | 1077 | unsigned int nic_base = dev->base_addr; |
1078 | kio_addr_t mii_addr = nic_base + DLINK_GPIO; | 1078 | unsigned int mii_addr = nic_base + DLINK_GPIO; |
1079 | u_short link; | 1079 | u_short link; |
1080 | 1080 | ||
1081 | if (!netif_device_present(dev)) goto reschedule; | 1081 | if (!netif_device_present(dev)) goto reschedule; |
@@ -1177,7 +1177,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1177 | { | 1177 | { |
1178 | pcnet_dev_t *info = PRIV(dev); | 1178 | pcnet_dev_t *info = PRIV(dev); |
1179 | u16 *data = (u16 *)&rq->ifr_ifru; | 1179 | u16 *data = (u16 *)&rq->ifr_ifru; |
1180 | kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO; | 1180 | unsigned int mii_addr = dev->base_addr + DLINK_GPIO; |
1181 | switch (cmd) { | 1181 | switch (cmd) { |
1182 | case SIOCGMIIPHY: | 1182 | case SIOCGMIIPHY: |
1183 | data[0] = info->phy_id; | 1183 | data[0] = info->phy_id; |
@@ -1199,7 +1199,7 @@ static void dma_get_8390_hdr(struct net_device *dev, | |||
1199 | struct e8390_pkt_hdr *hdr, | 1199 | struct e8390_pkt_hdr *hdr, |
1200 | int ring_page) | 1200 | int ring_page) |
1201 | { | 1201 | { |
1202 | kio_addr_t nic_base = dev->base_addr; | 1202 | unsigned int nic_base = dev->base_addr; |
1203 | 1203 | ||
1204 | if (ei_status.dmaing) { | 1204 | if (ei_status.dmaing) { |
1205 | printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." | 1205 | printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." |
@@ -1230,7 +1230,7 @@ static void dma_get_8390_hdr(struct net_device *dev, | |||
1230 | static void dma_block_input(struct net_device *dev, int count, | 1230 | static void dma_block_input(struct net_device *dev, int count, |
1231 | struct sk_buff *skb, int ring_offset) | 1231 | struct sk_buff *skb, int ring_offset) |
1232 | { | 1232 | { |
1233 | kio_addr_t nic_base = dev->base_addr; | 1233 | unsigned int nic_base = dev->base_addr; |
1234 | int xfer_count = count; | 1234 | int xfer_count = count; |
1235 | char *buf = skb->data; | 1235 | char *buf = skb->data; |
1236 | 1236 | ||
@@ -1285,7 +1285,7 @@ static void dma_block_input(struct net_device *dev, int count, | |||
1285 | static void dma_block_output(struct net_device *dev, int count, | 1285 | static void dma_block_output(struct net_device *dev, int count, |
1286 | const u_char *buf, const int start_page) | 1286 | const u_char *buf, const int start_page) |
1287 | { | 1287 | { |
1288 | kio_addr_t nic_base = dev->base_addr; | 1288 | unsigned int nic_base = dev->base_addr; |
1289 | pcnet_dev_t *info = PRIV(dev); | 1289 | pcnet_dev_t *info = PRIV(dev); |
1290 | #ifdef PCMCIA_DEBUG | 1290 | #ifdef PCMCIA_DEBUG |
1291 | int retries = 0; | 1291 | int retries = 0; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index c9868e9dac4c..f18eca9831e8 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -295,7 +295,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map); | |||
295 | static void smc_set_xcvr(struct net_device *dev, int if_port); | 295 | static void smc_set_xcvr(struct net_device *dev, int if_port); |
296 | static void smc_reset(struct net_device *dev); | 296 | static void smc_reset(struct net_device *dev); |
297 | static void media_check(u_long arg); | 297 | static void media_check(u_long arg); |
298 | static void mdio_sync(kio_addr_t addr); | 298 | static void mdio_sync(unsigned int addr); |
299 | static int mdio_read(struct net_device *dev, int phy_id, int loc); | 299 | static int mdio_read(struct net_device *dev, int phy_id, int loc); |
300 | static void mdio_write(struct net_device *dev, int phy_id, int loc, int value); | 300 | static void mdio_write(struct net_device *dev, int phy_id, int loc, int value); |
301 | static int smc_link_ok(struct net_device *dev); | 301 | static int smc_link_ok(struct net_device *dev); |
@@ -601,8 +601,8 @@ static void mot_config(struct pcmcia_device *link) | |||
601 | { | 601 | { |
602 | struct net_device *dev = link->priv; | 602 | struct net_device *dev = link->priv; |
603 | struct smc_private *smc = netdev_priv(dev); | 603 | struct smc_private *smc = netdev_priv(dev); |
604 | kio_addr_t ioaddr = dev->base_addr; | 604 | unsigned int ioaddr = dev->base_addr; |
605 | kio_addr_t iouart = link->io.BasePort2; | 605 | unsigned int iouart = link->io.BasePort2; |
606 | 606 | ||
607 | /* Set UART base address and force map with COR bit 1 */ | 607 | /* Set UART base address and force map with COR bit 1 */ |
608 | writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0); | 608 | writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0); |
@@ -621,7 +621,7 @@ static void mot_config(struct pcmcia_device *link) | |||
621 | static int mot_setup(struct pcmcia_device *link) | 621 | static int mot_setup(struct pcmcia_device *link) |
622 | { | 622 | { |
623 | struct net_device *dev = link->priv; | 623 | struct net_device *dev = link->priv; |
624 | kio_addr_t ioaddr = dev->base_addr; | 624 | unsigned int ioaddr = dev->base_addr; |
625 | int i, wait, loop; | 625 | int i, wait, loop; |
626 | u_int addr; | 626 | u_int addr; |
627 | 627 | ||
@@ -754,7 +754,7 @@ free_cfg_mem: | |||
754 | static int osi_config(struct pcmcia_device *link) | 754 | static int osi_config(struct pcmcia_device *link) |
755 | { | 755 | { |
756 | struct net_device *dev = link->priv; | 756 | struct net_device *dev = link->priv; |
757 | static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; | 757 | static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; |
758 | int i, j; | 758 | int i, j; |
759 | 759 | ||
760 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 760 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
@@ -900,7 +900,7 @@ static int smc91c92_resume(struct pcmcia_device *link) | |||
900 | static int check_sig(struct pcmcia_device *link) | 900 | static int check_sig(struct pcmcia_device *link) |
901 | { | 901 | { |
902 | struct net_device *dev = link->priv; | 902 | struct net_device *dev = link->priv; |
903 | kio_addr_t ioaddr = dev->base_addr; | 903 | unsigned int ioaddr = dev->base_addr; |
904 | int width; | 904 | int width; |
905 | u_short s; | 905 | u_short s; |
906 | 906 | ||
@@ -960,7 +960,7 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
960 | struct smc_private *smc = netdev_priv(dev); | 960 | struct smc_private *smc = netdev_priv(dev); |
961 | char *name; | 961 | char *name; |
962 | int i, j, rev; | 962 | int i, j, rev; |
963 | kio_addr_t ioaddr; | 963 | unsigned int ioaddr; |
964 | u_long mir; | 964 | u_long mir; |
965 | DECLARE_MAC_BUF(mac); | 965 | DECLARE_MAC_BUF(mac); |
966 | 966 | ||
@@ -1136,7 +1136,7 @@ static void smc91c92_release(struct pcmcia_device *link) | |||
1136 | #define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) | 1136 | #define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) |
1137 | #define MDIO_DATA_READ 0x02 | 1137 | #define MDIO_DATA_READ 0x02 |
1138 | 1138 | ||
1139 | static void mdio_sync(kio_addr_t addr) | 1139 | static void mdio_sync(unsigned int addr) |
1140 | { | 1140 | { |
1141 | int bits; | 1141 | int bits; |
1142 | for (bits = 0; bits < 32; bits++) { | 1142 | for (bits = 0; bits < 32; bits++) { |
@@ -1147,7 +1147,7 @@ static void mdio_sync(kio_addr_t addr) | |||
1147 | 1147 | ||
1148 | static int mdio_read(struct net_device *dev, int phy_id, int loc) | 1148 | static int mdio_read(struct net_device *dev, int phy_id, int loc) |
1149 | { | 1149 | { |
1150 | kio_addr_t addr = dev->base_addr + MGMT; | 1150 | unsigned int addr = dev->base_addr + MGMT; |
1151 | u_int cmd = (0x06<<10)|(phy_id<<5)|loc; | 1151 | u_int cmd = (0x06<<10)|(phy_id<<5)|loc; |
1152 | int i, retval = 0; | 1152 | int i, retval = 0; |
1153 | 1153 | ||
@@ -1167,7 +1167,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc) | |||
1167 | 1167 | ||
1168 | static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) | 1168 | static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) |
1169 | { | 1169 | { |
1170 | kio_addr_t addr = dev->base_addr + MGMT; | 1170 | unsigned int addr = dev->base_addr + MGMT; |
1171 | u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; | 1171 | u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; |
1172 | int i; | 1172 | int i; |
1173 | 1173 | ||
@@ -1193,7 +1193,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) | |||
1193 | #ifdef PCMCIA_DEBUG | 1193 | #ifdef PCMCIA_DEBUG |
1194 | static void smc_dump(struct net_device *dev) | 1194 | static void smc_dump(struct net_device *dev) |
1195 | { | 1195 | { |
1196 | kio_addr_t ioaddr = dev->base_addr; | 1196 | unsigned int ioaddr = dev->base_addr; |
1197 | u_short i, w, save; | 1197 | u_short i, w, save; |
1198 | save = inw(ioaddr + BANK_SELECT); | 1198 | save = inw(ioaddr + BANK_SELECT); |
1199 | for (w = 0; w < 4; w++) { | 1199 | for (w = 0; w < 4; w++) { |
@@ -1248,7 +1248,7 @@ static int smc_close(struct net_device *dev) | |||
1248 | { | 1248 | { |
1249 | struct smc_private *smc = netdev_priv(dev); | 1249 | struct smc_private *smc = netdev_priv(dev); |
1250 | struct pcmcia_device *link = smc->p_dev; | 1250 | struct pcmcia_device *link = smc->p_dev; |
1251 | kio_addr_t ioaddr = dev->base_addr; | 1251 | unsigned int ioaddr = dev->base_addr; |
1252 | 1252 | ||
1253 | DEBUG(0, "%s: smc_close(), status %4.4x.\n", | 1253 | DEBUG(0, "%s: smc_close(), status %4.4x.\n", |
1254 | dev->name, inw(ioaddr + BANK_SELECT)); | 1254 | dev->name, inw(ioaddr + BANK_SELECT)); |
@@ -1285,7 +1285,7 @@ static void smc_hardware_send_packet(struct net_device * dev) | |||
1285 | { | 1285 | { |
1286 | struct smc_private *smc = netdev_priv(dev); | 1286 | struct smc_private *smc = netdev_priv(dev); |
1287 | struct sk_buff *skb = smc->saved_skb; | 1287 | struct sk_buff *skb = smc->saved_skb; |
1288 | kio_addr_t ioaddr = dev->base_addr; | 1288 | unsigned int ioaddr = dev->base_addr; |
1289 | u_char packet_no; | 1289 | u_char packet_no; |
1290 | 1290 | ||
1291 | if (!skb) { | 1291 | if (!skb) { |
@@ -1349,7 +1349,7 @@ static void smc_hardware_send_packet(struct net_device * dev) | |||
1349 | static void smc_tx_timeout(struct net_device *dev) | 1349 | static void smc_tx_timeout(struct net_device *dev) |
1350 | { | 1350 | { |
1351 | struct smc_private *smc = netdev_priv(dev); | 1351 | struct smc_private *smc = netdev_priv(dev); |
1352 | kio_addr_t ioaddr = dev->base_addr; | 1352 | unsigned int ioaddr = dev->base_addr; |
1353 | 1353 | ||
1354 | printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, " | 1354 | printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, " |
1355 | "Tx_status %2.2x status %4.4x.\n", | 1355 | "Tx_status %2.2x status %4.4x.\n", |
@@ -1364,7 +1364,7 @@ static void smc_tx_timeout(struct net_device *dev) | |||
1364 | static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1364 | static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1365 | { | 1365 | { |
1366 | struct smc_private *smc = netdev_priv(dev); | 1366 | struct smc_private *smc = netdev_priv(dev); |
1367 | kio_addr_t ioaddr = dev->base_addr; | 1367 | unsigned int ioaddr = dev->base_addr; |
1368 | u_short num_pages; | 1368 | u_short num_pages; |
1369 | short time_out, ir; | 1369 | short time_out, ir; |
1370 | unsigned long flags; | 1370 | unsigned long flags; |
@@ -1434,7 +1434,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1434 | static void smc_tx_err(struct net_device * dev) | 1434 | static void smc_tx_err(struct net_device * dev) |
1435 | { | 1435 | { |
1436 | struct smc_private *smc = netdev_priv(dev); | 1436 | struct smc_private *smc = netdev_priv(dev); |
1437 | kio_addr_t ioaddr = dev->base_addr; | 1437 | unsigned int ioaddr = dev->base_addr; |
1438 | int saved_packet = inw(ioaddr + PNR_ARR) & 0xff; | 1438 | int saved_packet = inw(ioaddr + PNR_ARR) & 0xff; |
1439 | int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f; | 1439 | int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f; |
1440 | int tx_status; | 1440 | int tx_status; |
@@ -1478,7 +1478,7 @@ static void smc_tx_err(struct net_device * dev) | |||
1478 | static void smc_eph_irq(struct net_device *dev) | 1478 | static void smc_eph_irq(struct net_device *dev) |
1479 | { | 1479 | { |
1480 | struct smc_private *smc = netdev_priv(dev); | 1480 | struct smc_private *smc = netdev_priv(dev); |
1481 | kio_addr_t ioaddr = dev->base_addr; | 1481 | unsigned int ioaddr = dev->base_addr; |
1482 | u_short card_stats, ephs; | 1482 | u_short card_stats, ephs; |
1483 | 1483 | ||
1484 | SMC_SELECT_BANK(0); | 1484 | SMC_SELECT_BANK(0); |
@@ -1513,7 +1513,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1513 | { | 1513 | { |
1514 | struct net_device *dev = dev_id; | 1514 | struct net_device *dev = dev_id; |
1515 | struct smc_private *smc = netdev_priv(dev); | 1515 | struct smc_private *smc = netdev_priv(dev); |
1516 | kio_addr_t ioaddr; | 1516 | unsigned int ioaddr; |
1517 | u_short saved_bank, saved_pointer, mask, status; | 1517 | u_short saved_bank, saved_pointer, mask, status; |
1518 | unsigned int handled = 1; | 1518 | unsigned int handled = 1; |
1519 | char bogus_cnt = INTR_WORK; /* Work we are willing to do. */ | 1519 | char bogus_cnt = INTR_WORK; /* Work we are willing to do. */ |
@@ -1633,7 +1633,7 @@ irq_done: | |||
1633 | static void smc_rx(struct net_device *dev) | 1633 | static void smc_rx(struct net_device *dev) |
1634 | { | 1634 | { |
1635 | struct smc_private *smc = netdev_priv(dev); | 1635 | struct smc_private *smc = netdev_priv(dev); |
1636 | kio_addr_t ioaddr = dev->base_addr; | 1636 | unsigned int ioaddr = dev->base_addr; |
1637 | int rx_status; | 1637 | int rx_status; |
1638 | int packet_length; /* Caution: not frame length, rather words | 1638 | int packet_length; /* Caution: not frame length, rather words |
1639 | to transfer from the chip. */ | 1639 | to transfer from the chip. */ |
@@ -1738,7 +1738,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs, | |||
1738 | 1738 | ||
1739 | static void set_rx_mode(struct net_device *dev) | 1739 | static void set_rx_mode(struct net_device *dev) |
1740 | { | 1740 | { |
1741 | kio_addr_t ioaddr = dev->base_addr; | 1741 | unsigned int ioaddr = dev->base_addr; |
1742 | struct smc_private *smc = netdev_priv(dev); | 1742 | struct smc_private *smc = netdev_priv(dev); |
1743 | u_int multicast_table[ 2 ] = { 0, }; | 1743 | u_int multicast_table[ 2 ] = { 0, }; |
1744 | unsigned long flags; | 1744 | unsigned long flags; |
@@ -1804,7 +1804,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map) | |||
1804 | static void smc_set_xcvr(struct net_device *dev, int if_port) | 1804 | static void smc_set_xcvr(struct net_device *dev, int if_port) |
1805 | { | 1805 | { |
1806 | struct smc_private *smc = netdev_priv(dev); | 1806 | struct smc_private *smc = netdev_priv(dev); |
1807 | kio_addr_t ioaddr = dev->base_addr; | 1807 | unsigned int ioaddr = dev->base_addr; |
1808 | u_short saved_bank; | 1808 | u_short saved_bank; |
1809 | 1809 | ||
1810 | saved_bank = inw(ioaddr + BANK_SELECT); | 1810 | saved_bank = inw(ioaddr + BANK_SELECT); |
@@ -1827,7 +1827,7 @@ static void smc_set_xcvr(struct net_device *dev, int if_port) | |||
1827 | 1827 | ||
1828 | static void smc_reset(struct net_device *dev) | 1828 | static void smc_reset(struct net_device *dev) |
1829 | { | 1829 | { |
1830 | kio_addr_t ioaddr = dev->base_addr; | 1830 | unsigned int ioaddr = dev->base_addr; |
1831 | struct smc_private *smc = netdev_priv(dev); | 1831 | struct smc_private *smc = netdev_priv(dev); |
1832 | int i; | 1832 | int i; |
1833 | 1833 | ||
@@ -1904,7 +1904,7 @@ static void media_check(u_long arg) | |||
1904 | { | 1904 | { |
1905 | struct net_device *dev = (struct net_device *) arg; | 1905 | struct net_device *dev = (struct net_device *) arg; |
1906 | struct smc_private *smc = netdev_priv(dev); | 1906 | struct smc_private *smc = netdev_priv(dev); |
1907 | kio_addr_t ioaddr = dev->base_addr; | 1907 | unsigned int ioaddr = dev->base_addr; |
1908 | u_short i, media, saved_bank; | 1908 | u_short i, media, saved_bank; |
1909 | u_short link; | 1909 | u_short link; |
1910 | unsigned long flags; | 1910 | unsigned long flags; |
@@ -2021,7 +2021,7 @@ reschedule: | |||
2021 | 2021 | ||
2022 | static int smc_link_ok(struct net_device *dev) | 2022 | static int smc_link_ok(struct net_device *dev) |
2023 | { | 2023 | { |
2024 | kio_addr_t ioaddr = dev->base_addr; | 2024 | unsigned int ioaddr = dev->base_addr; |
2025 | struct smc_private *smc = netdev_priv(dev); | 2025 | struct smc_private *smc = netdev_priv(dev); |
2026 | 2026 | ||
2027 | if (smc->cfg & CFG_MII_SELECT) { | 2027 | if (smc->cfg & CFG_MII_SELECT) { |
@@ -2035,7 +2035,7 @@ static int smc_link_ok(struct net_device *dev) | |||
2035 | static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) | 2035 | static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) |
2036 | { | 2036 | { |
2037 | u16 tmp; | 2037 | u16 tmp; |
2038 | kio_addr_t ioaddr = dev->base_addr; | 2038 | unsigned int ioaddr = dev->base_addr; |
2039 | 2039 | ||
2040 | ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | | 2040 | ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | |
2041 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); | 2041 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); |
@@ -2057,7 +2057,7 @@ static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2057 | static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) | 2057 | static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) |
2058 | { | 2058 | { |
2059 | u16 tmp; | 2059 | u16 tmp; |
2060 | kio_addr_t ioaddr = dev->base_addr; | 2060 | unsigned int ioaddr = dev->base_addr; |
2061 | 2061 | ||
2062 | if (ecmd->speed != SPEED_10) | 2062 | if (ecmd->speed != SPEED_10) |
2063 | return -EINVAL; | 2063 | return -EINVAL; |
@@ -2100,7 +2100,7 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info | |||
2100 | static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | 2100 | static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
2101 | { | 2101 | { |
2102 | struct smc_private *smc = netdev_priv(dev); | 2102 | struct smc_private *smc = netdev_priv(dev); |
2103 | kio_addr_t ioaddr = dev->base_addr; | 2103 | unsigned int ioaddr = dev->base_addr; |
2104 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2104 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2105 | int ret; | 2105 | int ret; |
2106 | 2106 | ||
@@ -2118,7 +2118,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2118 | static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | 2118 | static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
2119 | { | 2119 | { |
2120 | struct smc_private *smc = netdev_priv(dev); | 2120 | struct smc_private *smc = netdev_priv(dev); |
2121 | kio_addr_t ioaddr = dev->base_addr; | 2121 | unsigned int ioaddr = dev->base_addr; |
2122 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2122 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2123 | int ret; | 2123 | int ret; |
2124 | 2124 | ||
@@ -2136,7 +2136,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2136 | static u32 smc_get_link(struct net_device *dev) | 2136 | static u32 smc_get_link(struct net_device *dev) |
2137 | { | 2137 | { |
2138 | struct smc_private *smc = netdev_priv(dev); | 2138 | struct smc_private *smc = netdev_priv(dev); |
2139 | kio_addr_t ioaddr = dev->base_addr; | 2139 | unsigned int ioaddr = dev->base_addr; |
2140 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2140 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2141 | u32 ret; | 2141 | u32 ret; |
2142 | 2142 | ||
@@ -2164,7 +2164,7 @@ static int smc_nway_reset(struct net_device *dev) | |||
2164 | { | 2164 | { |
2165 | struct smc_private *smc = netdev_priv(dev); | 2165 | struct smc_private *smc = netdev_priv(dev); |
2166 | if (smc->cfg & CFG_MII_SELECT) { | 2166 | if (smc->cfg & CFG_MII_SELECT) { |
2167 | kio_addr_t ioaddr = dev->base_addr; | 2167 | unsigned int ioaddr = dev->base_addr; |
2168 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2168 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2169 | int res; | 2169 | int res; |
2170 | 2170 | ||
@@ -2196,7 +2196,7 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |||
2196 | struct mii_ioctl_data *mii = if_mii(rq); | 2196 | struct mii_ioctl_data *mii = if_mii(rq); |
2197 | int rc = 0; | 2197 | int rc = 0; |
2198 | u16 saved_bank; | 2198 | u16 saved_bank; |
2199 | kio_addr_t ioaddr = dev->base_addr; | 2199 | unsigned int ioaddr = dev->base_addr; |
2200 | 2200 | ||
2201 | if (!netif_running(dev)) | 2201 | if (!netif_running(dev)) |
2202 | return -EINVAL; | 2202 | return -EINVAL; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 1f09bea6db5a..d041f831a18d 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -273,12 +273,12 @@ INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */ | |||
273 | static unsigned maxrx_bytes = 22000; | 273 | static unsigned maxrx_bytes = 22000; |
274 | 274 | ||
275 | /* MII management prototypes */ | 275 | /* MII management prototypes */ |
276 | static void mii_idle(kio_addr_t ioaddr); | 276 | static void mii_idle(unsigned int ioaddr); |
277 | static void mii_putbit(kio_addr_t ioaddr, unsigned data); | 277 | static void mii_putbit(unsigned int ioaddr, unsigned data); |
278 | static int mii_getbit(kio_addr_t ioaddr); | 278 | static int mii_getbit(unsigned int ioaddr); |
279 | static void mii_wbits(kio_addr_t ioaddr, unsigned data, int len); | 279 | static void mii_wbits(unsigned int ioaddr, unsigned data, int len); |
280 | static unsigned mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg); | 280 | static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg); |
281 | static void mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, | 281 | static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, |
282 | unsigned data, int len); | 282 | unsigned data, int len); |
283 | 283 | ||
284 | /* | 284 | /* |
@@ -403,7 +403,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse) | |||
403 | static void | 403 | static void |
404 | PrintRegisters(struct net_device *dev) | 404 | PrintRegisters(struct net_device *dev) |
405 | { | 405 | { |
406 | kio_addr_t ioaddr = dev->base_addr; | 406 | unsigned int ioaddr = dev->base_addr; |
407 | 407 | ||
408 | if (pc_debug > 1) { | 408 | if (pc_debug > 1) { |
409 | int i, page; | 409 | int i, page; |
@@ -439,7 +439,7 @@ PrintRegisters(struct net_device *dev) | |||
439 | * Turn around for read | 439 | * Turn around for read |
440 | */ | 440 | */ |
441 | static void | 441 | static void |
442 | mii_idle(kio_addr_t ioaddr) | 442 | mii_idle(unsigned int ioaddr) |
443 | { | 443 | { |
444 | PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */ | 444 | PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */ |
445 | udelay(1); | 445 | udelay(1); |
@@ -451,7 +451,7 @@ mii_idle(kio_addr_t ioaddr) | |||
451 | * Write a bit to MDI/O | 451 | * Write a bit to MDI/O |
452 | */ | 452 | */ |
453 | static void | 453 | static void |
454 | mii_putbit(kio_addr_t ioaddr, unsigned data) | 454 | mii_putbit(unsigned int ioaddr, unsigned data) |
455 | { | 455 | { |
456 | #if 1 | 456 | #if 1 |
457 | if (data) { | 457 | if (data) { |
@@ -484,7 +484,7 @@ mii_putbit(kio_addr_t ioaddr, unsigned data) | |||
484 | * Get a bit from MDI/O | 484 | * Get a bit from MDI/O |
485 | */ | 485 | */ |
486 | static int | 486 | static int |
487 | mii_getbit(kio_addr_t ioaddr) | 487 | mii_getbit(unsigned int ioaddr) |
488 | { | 488 | { |
489 | unsigned d; | 489 | unsigned d; |
490 | 490 | ||
@@ -497,7 +497,7 @@ mii_getbit(kio_addr_t ioaddr) | |||
497 | } | 497 | } |
498 | 498 | ||
499 | static void | 499 | static void |
500 | mii_wbits(kio_addr_t ioaddr, unsigned data, int len) | 500 | mii_wbits(unsigned int ioaddr, unsigned data, int len) |
501 | { | 501 | { |
502 | unsigned m = 1 << (len-1); | 502 | unsigned m = 1 << (len-1); |
503 | for (; m; m >>= 1) | 503 | for (; m; m >>= 1) |
@@ -505,7 +505,7 @@ mii_wbits(kio_addr_t ioaddr, unsigned data, int len) | |||
505 | } | 505 | } |
506 | 506 | ||
507 | static unsigned | 507 | static unsigned |
508 | mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg) | 508 | mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg) |
509 | { | 509 | { |
510 | int i; | 510 | int i; |
511 | unsigned data=0, m; | 511 | unsigned data=0, m; |
@@ -527,7 +527,8 @@ mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg) | |||
527 | } | 527 | } |
528 | 528 | ||
529 | static void | 529 | static void |
530 | mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len) | 530 | mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, |
531 | int len) | ||
531 | { | 532 | { |
532 | int i; | 533 | int i; |
533 | 534 | ||
@@ -726,7 +727,7 @@ xirc2ps_config(struct pcmcia_device * link) | |||
726 | local_info_t *local = netdev_priv(dev); | 727 | local_info_t *local = netdev_priv(dev); |
727 | tuple_t tuple; | 728 | tuple_t tuple; |
728 | cisparse_t parse; | 729 | cisparse_t parse; |
729 | kio_addr_t ioaddr; | 730 | unsigned int ioaddr; |
730 | int err, i; | 731 | int err, i; |
731 | u_char buf[64]; | 732 | u_char buf[64]; |
732 | cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data; | 733 | cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data; |
@@ -1104,7 +1105,7 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1104 | { | 1105 | { |
1105 | struct net_device *dev = (struct net_device *)dev_id; | 1106 | struct net_device *dev = (struct net_device *)dev_id; |
1106 | local_info_t *lp = netdev_priv(dev); | 1107 | local_info_t *lp = netdev_priv(dev); |
1107 | kio_addr_t ioaddr; | 1108 | unsigned int ioaddr; |
1108 | u_char saved_page; | 1109 | u_char saved_page; |
1109 | unsigned bytes_rcvd; | 1110 | unsigned bytes_rcvd; |
1110 | unsigned int_status, eth_status, rx_status, tx_status; | 1111 | unsigned int_status, eth_status, rx_status, tx_status; |
@@ -1209,7 +1210,7 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1209 | unsigned i; | 1210 | unsigned i; |
1210 | u_long *p = skb_put(skb, pktlen); | 1211 | u_long *p = skb_put(skb, pktlen); |
1211 | register u_long a; | 1212 | register u_long a; |
1212 | kio_addr_t edpreg = ioaddr+XIRCREG_EDP-2; | 1213 | unsigned int edpreg = ioaddr+XIRCREG_EDP-2; |
1213 | for (i=0; i < len ; i += 4, p++) { | 1214 | for (i=0; i < len ; i += 4, p++) { |
1214 | a = inl(edpreg); | 1215 | a = inl(edpreg); |
1215 | __asm__("rorl $16,%0\n\t" | 1216 | __asm__("rorl $16,%0\n\t" |
@@ -1346,7 +1347,7 @@ static int | |||
1346 | do_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1347 | do_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1347 | { | 1348 | { |
1348 | local_info_t *lp = netdev_priv(dev); | 1349 | local_info_t *lp = netdev_priv(dev); |
1349 | kio_addr_t ioaddr = dev->base_addr; | 1350 | unsigned int ioaddr = dev->base_addr; |
1350 | int okay; | 1351 | int okay; |
1351 | unsigned freespace; | 1352 | unsigned freespace; |
1352 | unsigned pktlen = skb->len; | 1353 | unsigned pktlen = skb->len; |
@@ -1415,7 +1416,7 @@ do_get_stats(struct net_device *dev) | |||
1415 | static void | 1416 | static void |
1416 | set_addresses(struct net_device *dev) | 1417 | set_addresses(struct net_device *dev) |
1417 | { | 1418 | { |
1418 | kio_addr_t ioaddr = dev->base_addr; | 1419 | unsigned int ioaddr = dev->base_addr; |
1419 | local_info_t *lp = netdev_priv(dev); | 1420 | local_info_t *lp = netdev_priv(dev); |
1420 | struct dev_mc_list *dmi = dev->mc_list; | 1421 | struct dev_mc_list *dmi = dev->mc_list; |
1421 | unsigned char *addr; | 1422 | unsigned char *addr; |
@@ -1459,7 +1460,7 @@ set_addresses(struct net_device *dev) | |||
1459 | static void | 1460 | static void |
1460 | set_multicast_list(struct net_device *dev) | 1461 | set_multicast_list(struct net_device *dev) |
1461 | { | 1462 | { |
1462 | kio_addr_t ioaddr = dev->base_addr; | 1463 | unsigned int ioaddr = dev->base_addr; |
1463 | 1464 | ||
1464 | SelectPage(0x42); | 1465 | SelectPage(0x42); |
1465 | if (dev->flags & IFF_PROMISC) { /* snoop */ | 1466 | if (dev->flags & IFF_PROMISC) { /* snoop */ |
@@ -1543,7 +1544,7 @@ static int | |||
1543 | do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 1544 | do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1544 | { | 1545 | { |
1545 | local_info_t *local = netdev_priv(dev); | 1546 | local_info_t *local = netdev_priv(dev); |
1546 | kio_addr_t ioaddr = dev->base_addr; | 1547 | unsigned int ioaddr = dev->base_addr; |
1547 | u16 *data = (u16 *)&rq->ifr_ifru; | 1548 | u16 *data = (u16 *)&rq->ifr_ifru; |
1548 | 1549 | ||
1549 | DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", | 1550 | DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", |
@@ -1575,7 +1576,7 @@ static void | |||
1575 | hardreset(struct net_device *dev) | 1576 | hardreset(struct net_device *dev) |
1576 | { | 1577 | { |
1577 | local_info_t *local = netdev_priv(dev); | 1578 | local_info_t *local = netdev_priv(dev); |
1578 | kio_addr_t ioaddr = dev->base_addr; | 1579 | unsigned int ioaddr = dev->base_addr; |
1579 | 1580 | ||
1580 | SelectPage(4); | 1581 | SelectPage(4); |
1581 | udelay(1); | 1582 | udelay(1); |
@@ -1592,7 +1593,7 @@ static void | |||
1592 | do_reset(struct net_device *dev, int full) | 1593 | do_reset(struct net_device *dev, int full) |
1593 | { | 1594 | { |
1594 | local_info_t *local = netdev_priv(dev); | 1595 | local_info_t *local = netdev_priv(dev); |
1595 | kio_addr_t ioaddr = dev->base_addr; | 1596 | unsigned int ioaddr = dev->base_addr; |
1596 | unsigned value; | 1597 | unsigned value; |
1597 | 1598 | ||
1598 | DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); | 1599 | DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); |
@@ -1753,7 +1754,7 @@ static int | |||
1753 | init_mii(struct net_device *dev) | 1754 | init_mii(struct net_device *dev) |
1754 | { | 1755 | { |
1755 | local_info_t *local = netdev_priv(dev); | 1756 | local_info_t *local = netdev_priv(dev); |
1756 | kio_addr_t ioaddr = dev->base_addr; | 1757 | unsigned int ioaddr = dev->base_addr; |
1757 | unsigned control, status, linkpartner; | 1758 | unsigned control, status, linkpartner; |
1758 | int i; | 1759 | int i; |
1759 | 1760 | ||
@@ -1826,7 +1827,7 @@ static void | |||
1826 | do_powerdown(struct net_device *dev) | 1827 | do_powerdown(struct net_device *dev) |
1827 | { | 1828 | { |
1828 | 1829 | ||
1829 | kio_addr_t ioaddr = dev->base_addr; | 1830 | unsigned int ioaddr = dev->base_addr; |
1830 | 1831 | ||
1831 | DEBUG(0, "do_powerdown(%p)\n", dev); | 1832 | DEBUG(0, "do_powerdown(%p)\n", dev); |
1832 | 1833 | ||
@@ -1838,7 +1839,7 @@ do_powerdown(struct net_device *dev) | |||
1838 | static int | 1839 | static int |
1839 | do_stop(struct net_device *dev) | 1840 | do_stop(struct net_device *dev) |
1840 | { | 1841 | { |
1841 | kio_addr_t ioaddr = dev->base_addr; | 1842 | unsigned int ioaddr = dev->base_addr; |
1842 | local_info_t *lp = netdev_priv(dev); | 1843 | local_info_t *lp = netdev_priv(dev); |
1843 | struct pcmcia_device *link = lp->p_dev; | 1844 | struct pcmcia_device *link = lp->p_dev; |
1844 | 1845 | ||
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 1b51bb668d39..5aa0a8089694 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -2468,9 +2468,10 @@ static int __init pppol2tp_init(void) | |||
2468 | 2468 | ||
2469 | out: | 2469 | out: |
2470 | return err; | 2470 | return err; |
2471 | 2471 | #ifdef CONFIG_PROC_FS | |
2472 | out_unregister_pppox_proto: | 2472 | out_unregister_pppox_proto: |
2473 | unregister_pppox_proto(PX_PROTO_OL2TP); | 2473 | unregister_pppox_proto(PX_PROTO_OL2TP); |
2474 | #endif | ||
2474 | out_unregister_pppol2tp_proto: | 2475 | out_unregister_pppol2tp_proto: |
2475 | proto_unregister(&pppol2tp_sk_proto); | 2476 | proto_unregister(&pppol2tp_sk_proto); |
2476 | goto out; | 2477 | goto out; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 46339f6bcd00..038c1ef94d2e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -529,9 +529,13 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr) | |||
529 | 529 | ||
530 | if (ifr->ifr_flags & IFF_NO_PI) | 530 | if (ifr->ifr_flags & IFF_NO_PI) |
531 | tun->flags |= TUN_NO_PI; | 531 | tun->flags |= TUN_NO_PI; |
532 | else | ||
533 | tun->flags &= ~TUN_NO_PI; | ||
532 | 534 | ||
533 | if (ifr->ifr_flags & IFF_ONE_QUEUE) | 535 | if (ifr->ifr_flags & IFF_ONE_QUEUE) |
534 | tun->flags |= TUN_ONE_QUEUE; | 536 | tun->flags |= TUN_ONE_QUEUE; |
537 | else | ||
538 | tun->flags &= ~TUN_ONE_QUEUE; | ||
535 | 539 | ||
536 | file->private_data = tun; | 540 | file->private_data = tun; |
537 | tun->attached = 1; | 541 | tun->attached = 1; |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 32a24f5c4fa6..08a011f0834a 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -724,6 +724,7 @@ struct b43_wldev { | |||
724 | bool short_preamble; /* TRUE, if short preamble is enabled. */ | 724 | bool short_preamble; /* TRUE, if short preamble is enabled. */ |
725 | bool short_slot; /* TRUE, if short slot timing is enabled. */ | 725 | bool short_slot; /* TRUE, if short slot timing is enabled. */ |
726 | bool radio_hw_enable; /* saved state of radio hardware enabled state */ | 726 | bool radio_hw_enable; /* saved state of radio hardware enabled state */ |
727 | bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */ | ||
727 | 728 | ||
728 | /* PHY/Radio device. */ | 729 | /* PHY/Radio device. */ |
729 | struct b43_phy phy; | 730 | struct b43_phy phy; |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 8a708b77925d..3dfb28a34be9 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring) | |||
337 | return idx_to_prio[index]; | 337 | return idx_to_prio[index]; |
338 | } | 338 | } |
339 | 339 | ||
340 | u16 b43_dmacontroller_base(int dma64bit, int controller_idx) | 340 | static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) |
341 | { | 341 | { |
342 | static const u16 map64[] = { | 342 | static const u16 map64[] = { |
343 | B43_MMIO_DMA64_BASE0, | 343 | B43_MMIO_DMA64_BASE0, |
@@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx) | |||
356 | B43_MMIO_DMA32_BASE5, | 356 | B43_MMIO_DMA32_BASE5, |
357 | }; | 357 | }; |
358 | 358 | ||
359 | if (dma64bit) { | 359 | if (type == B43_DMA_64BIT) { |
360 | B43_WARN_ON(!(controller_idx >= 0 && | 360 | B43_WARN_ON(!(controller_idx >= 0 && |
361 | controller_idx < ARRAY_SIZE(map64))); | 361 | controller_idx < ARRAY_SIZE(map64))); |
362 | return map64[controller_idx]; | 362 | return map64[controller_idx]; |
@@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
437 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, | 437 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, |
438 | * which accounts for the GFP_DMA flag below. | 438 | * which accounts for the GFP_DMA flag below. |
439 | */ | 439 | */ |
440 | if (ring->dma64) | 440 | if (ring->type == B43_DMA_64BIT) |
441 | flags |= GFP_DMA; | 441 | flags |= GFP_DMA; |
442 | ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, | 442 | ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, |
443 | &(ring->dmabase), flags); | 443 | &(ring->dmabase), flags); |
@@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring) | |||
459 | } | 459 | } |
460 | 460 | ||
461 | /* Reset the RX DMA channel */ | 461 | /* Reset the RX DMA channel */ |
462 | int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | 462 | static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, |
463 | enum b43_dmatype type) | ||
463 | { | 464 | { |
464 | int i; | 465 | int i; |
465 | u32 value; | 466 | u32 value; |
@@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | |||
467 | 468 | ||
468 | might_sleep(); | 469 | might_sleep(); |
469 | 470 | ||
470 | offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; | 471 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; |
471 | b43_write32(dev, mmio_base + offset, 0); | 472 | b43_write32(dev, mmio_base + offset, 0); |
472 | for (i = 0; i < 10; i++) { | 473 | for (i = 0; i < 10; i++) { |
473 | offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; | 474 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : |
475 | B43_DMA32_RXSTATUS; | ||
474 | value = b43_read32(dev, mmio_base + offset); | 476 | value = b43_read32(dev, mmio_base + offset); |
475 | if (dma64) { | 477 | if (type == B43_DMA_64BIT) { |
476 | value &= B43_DMA64_RXSTAT; | 478 | value &= B43_DMA64_RXSTAT; |
477 | if (value == B43_DMA64_RXSTAT_DISABLED) { | 479 | if (value == B43_DMA64_RXSTAT_DISABLED) { |
478 | i = -1; | 480 | i = -1; |
@@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | |||
496 | } | 498 | } |
497 | 499 | ||
498 | /* Reset the TX DMA channel */ | 500 | /* Reset the TX DMA channel */ |
499 | int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | 501 | static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, |
502 | enum b43_dmatype type) | ||
500 | { | 503 | { |
501 | int i; | 504 | int i; |
502 | u32 value; | 505 | u32 value; |
@@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | |||
505 | might_sleep(); | 508 | might_sleep(); |
506 | 509 | ||
507 | for (i = 0; i < 10; i++) { | 510 | for (i = 0; i < 10; i++) { |
508 | offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; | 511 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : |
512 | B43_DMA32_TXSTATUS; | ||
509 | value = b43_read32(dev, mmio_base + offset); | 513 | value = b43_read32(dev, mmio_base + offset); |
510 | if (dma64) { | 514 | if (type == B43_DMA_64BIT) { |
511 | value &= B43_DMA64_TXSTAT; | 515 | value &= B43_DMA64_TXSTAT; |
512 | if (value == B43_DMA64_TXSTAT_DISABLED || | 516 | if (value == B43_DMA64_TXSTAT_DISABLED || |
513 | value == B43_DMA64_TXSTAT_IDLEWAIT || | 517 | value == B43_DMA64_TXSTAT_IDLEWAIT || |
@@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | |||
522 | } | 526 | } |
523 | msleep(1); | 527 | msleep(1); |
524 | } | 528 | } |
525 | offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; | 529 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; |
526 | b43_write32(dev, mmio_base + offset, 0); | 530 | b43_write32(dev, mmio_base + offset, 0); |
527 | for (i = 0; i < 10; i++) { | 531 | for (i = 0; i < 10; i++) { |
528 | offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; | 532 | offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : |
533 | B43_DMA32_TXSTATUS; | ||
529 | value = b43_read32(dev, mmio_base + offset); | 534 | value = b43_read32(dev, mmio_base + offset); |
530 | if (dma64) { | 535 | if (type == B43_DMA_64BIT) { |
531 | value &= B43_DMA64_TXSTAT; | 536 | value &= B43_DMA64_TXSTAT; |
532 | if (value == B43_DMA64_TXSTAT_DISABLED) { | 537 | if (value == B43_DMA64_TXSTAT_DISABLED) { |
533 | i = -1; | 538 | i = -1; |
@@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | |||
552 | return 0; | 557 | return 0; |
553 | } | 558 | } |
554 | 559 | ||
560 | /* Check if a DMA mapping address is invalid. */ | ||
561 | static bool b43_dma_mapping_error(struct b43_dmaring *ring, | ||
562 | dma_addr_t addr, | ||
563 | size_t buffersize) | ||
564 | { | ||
565 | if (unlikely(dma_mapping_error(addr))) | ||
566 | return 1; | ||
567 | |||
568 | switch (ring->type) { | ||
569 | case B43_DMA_30BIT: | ||
570 | if ((u64)addr + buffersize > (1ULL << 30)) | ||
571 | return 1; | ||
572 | break; | ||
573 | case B43_DMA_32BIT: | ||
574 | if ((u64)addr + buffersize > (1ULL << 32)) | ||
575 | return 1; | ||
576 | break; | ||
577 | case B43_DMA_64BIT: | ||
578 | /* Currently we can't have addresses beyond | ||
579 | * 64bit in the kernel. */ | ||
580 | break; | ||
581 | } | ||
582 | |||
583 | /* The address is OK. */ | ||
584 | return 0; | ||
585 | } | ||
586 | |||
555 | static int setup_rx_descbuffer(struct b43_dmaring *ring, | 587 | static int setup_rx_descbuffer(struct b43_dmaring *ring, |
556 | struct b43_dmadesc_generic *desc, | 588 | struct b43_dmadesc_generic *desc, |
557 | struct b43_dmadesc_meta *meta, gfp_t gfp_flags) | 589 | struct b43_dmadesc_meta *meta, gfp_t gfp_flags) |
@@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | |||
567 | if (unlikely(!skb)) | 599 | if (unlikely(!skb)) |
568 | return -ENOMEM; | 600 | return -ENOMEM; |
569 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); | 601 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); |
570 | if (dma_mapping_error(dmaaddr)) { | 602 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { |
571 | /* ugh. try to realloc in zone_dma */ | 603 | /* ugh. try to realloc in zone_dma */ |
572 | gfp_flags |= GFP_DMA; | 604 | gfp_flags |= GFP_DMA; |
573 | 605 | ||
@@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | |||
580 | ring->rx_buffersize, 0); | 612 | ring->rx_buffersize, 0); |
581 | } | 613 | } |
582 | 614 | ||
583 | if (dma_mapping_error(dmaaddr)) { | 615 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { |
584 | dev_kfree_skb_any(skb); | 616 | dev_kfree_skb_any(skb); |
585 | return -EIO; | 617 | return -EIO; |
586 | } | 618 | } |
@@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring) | |||
645 | u32 trans = ssb_dma_translation(ring->dev->dev); | 677 | u32 trans = ssb_dma_translation(ring->dev->dev); |
646 | 678 | ||
647 | if (ring->tx) { | 679 | if (ring->tx) { |
648 | if (ring->dma64) { | 680 | if (ring->type == B43_DMA_64BIT) { |
649 | u64 ringbase = (u64) (ring->dmabase); | 681 | u64 ringbase = (u64) (ring->dmabase); |
650 | 682 | ||
651 | addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) | 683 | addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) |
@@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring) | |||
677 | err = alloc_initial_descbuffers(ring); | 709 | err = alloc_initial_descbuffers(ring); |
678 | if (err) | 710 | if (err) |
679 | goto out; | 711 | goto out; |
680 | if (ring->dma64) { | 712 | if (ring->type == B43_DMA_64BIT) { |
681 | u64 ringbase = (u64) (ring->dmabase); | 713 | u64 ringbase = (u64) (ring->dmabase); |
682 | 714 | ||
683 | addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) | 715 | addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) |
@@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring) | |||
722 | { | 754 | { |
723 | if (ring->tx) { | 755 | if (ring->tx) { |
724 | b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, | 756 | b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, |
725 | ring->dma64); | 757 | ring->type); |
726 | if (ring->dma64) { | 758 | if (ring->type == B43_DMA_64BIT) { |
727 | b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); | 759 | b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); |
728 | b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); | 760 | b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); |
729 | } else | 761 | } else |
730 | b43_dma_write(ring, B43_DMA32_TXRING, 0); | 762 | b43_dma_write(ring, B43_DMA32_TXRING, 0); |
731 | } else { | 763 | } else { |
732 | b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, | 764 | b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, |
733 | ring->dma64); | 765 | ring->type); |
734 | if (ring->dma64) { | 766 | if (ring->type == B43_DMA_64BIT) { |
735 | b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); | 767 | b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); |
736 | b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); | 768 | b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); |
737 | } else | 769 | } else |
@@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev) | |||
786 | static | 818 | static |
787 | struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | 819 | struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, |
788 | int controller_index, | 820 | int controller_index, |
789 | int for_tx, int dma64) | 821 | int for_tx, |
822 | enum b43_dmatype type) | ||
790 | { | 823 | { |
791 | struct b43_dmaring *ring; | 824 | struct b43_dmaring *ring; |
792 | int err; | 825 | int err; |
@@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
796 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | 829 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
797 | if (!ring) | 830 | if (!ring) |
798 | goto out; | 831 | goto out; |
832 | ring->type = type; | ||
799 | 833 | ||
800 | nr_slots = B43_RXRING_SLOTS; | 834 | nr_slots = B43_RXRING_SLOTS; |
801 | if (for_tx) | 835 | if (for_tx) |
@@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
818 | b43_txhdr_size(dev), | 852 | b43_txhdr_size(dev), |
819 | DMA_TO_DEVICE); | 853 | DMA_TO_DEVICE); |
820 | 854 | ||
821 | if (dma_mapping_error(dma_test)) { | 855 | if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) { |
822 | /* ugh realloc */ | 856 | /* ugh realloc */ |
823 | kfree(ring->txhdr_cache); | 857 | kfree(ring->txhdr_cache); |
824 | ring->txhdr_cache = kcalloc(nr_slots, | 858 | ring->txhdr_cache = kcalloc(nr_slots, |
@@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
832 | b43_txhdr_size(dev), | 866 | b43_txhdr_size(dev), |
833 | DMA_TO_DEVICE); | 867 | DMA_TO_DEVICE); |
834 | 868 | ||
835 | if (dma_mapping_error(dma_test)) | 869 | if (b43_dma_mapping_error(ring, dma_test, |
870 | b43_txhdr_size(dev))) | ||
836 | goto err_kfree_txhdr_cache; | 871 | goto err_kfree_txhdr_cache; |
837 | } | 872 | } |
838 | 873 | ||
@@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
843 | 878 | ||
844 | ring->dev = dev; | 879 | ring->dev = dev; |
845 | ring->nr_slots = nr_slots; | 880 | ring->nr_slots = nr_slots; |
846 | ring->mmio_base = b43_dmacontroller_base(dma64, controller_index); | 881 | ring->mmio_base = b43_dmacontroller_base(type, controller_index); |
847 | ring->index = controller_index; | 882 | ring->index = controller_index; |
848 | ring->dma64 = !!dma64; | 883 | if (type == B43_DMA_64BIT) |
849 | if (dma64) | ||
850 | ring->ops = &dma64_ops; | 884 | ring->ops = &dma64_ops; |
851 | else | 885 | else |
852 | ring->ops = &dma32_ops; | 886 | ring->ops = &dma32_ops; |
@@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring) | |||
896 | if (!ring) | 930 | if (!ring) |
897 | return; | 931 | return; |
898 | 932 | ||
899 | b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", | 933 | b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n", |
900 | (ring->dma64) ? "64" : "32", | 934 | (unsigned int)(ring->type), |
901 | ring->mmio_base, | 935 | ring->mmio_base, |
902 | (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); | 936 | (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); |
903 | /* Device IRQs are disabled prior entering this function, | 937 | /* Device IRQs are disabled prior entering this function, |
@@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev) | |||
941 | struct b43_dmaring *ring; | 975 | struct b43_dmaring *ring; |
942 | int err; | 976 | int err; |
943 | u64 dmamask; | 977 | u64 dmamask; |
944 | int dma64 = 0; | 978 | enum b43_dmatype type; |
945 | 979 | ||
946 | dmamask = supported_dma_mask(dev); | 980 | dmamask = supported_dma_mask(dev); |
947 | if (dmamask == DMA_64BIT_MASK) | 981 | switch (dmamask) { |
948 | dma64 = 1; | 982 | default: |
949 | 983 | B43_WARN_ON(1); | |
984 | case DMA_30BIT_MASK: | ||
985 | type = B43_DMA_30BIT; | ||
986 | break; | ||
987 | case DMA_32BIT_MASK: | ||
988 | type = B43_DMA_32BIT; | ||
989 | break; | ||
990 | case DMA_64BIT_MASK: | ||
991 | type = B43_DMA_64BIT; | ||
992 | break; | ||
993 | } | ||
950 | err = ssb_dma_set_mask(dev->dev, dmamask); | 994 | err = ssb_dma_set_mask(dev->dev, dmamask); |
951 | if (err) { | 995 | if (err) { |
952 | b43err(dev->wl, "The machine/kernel does not support " | 996 | b43err(dev->wl, "The machine/kernel does not support " |
@@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev) | |||
958 | 1002 | ||
959 | err = -ENOMEM; | 1003 | err = -ENOMEM; |
960 | /* setup TX DMA channels. */ | 1004 | /* setup TX DMA channels. */ |
961 | ring = b43_setup_dmaring(dev, 0, 1, dma64); | 1005 | ring = b43_setup_dmaring(dev, 0, 1, type); |
962 | if (!ring) | 1006 | if (!ring) |
963 | goto out; | 1007 | goto out; |
964 | dma->tx_ring0 = ring; | 1008 | dma->tx_ring0 = ring; |
965 | 1009 | ||
966 | ring = b43_setup_dmaring(dev, 1, 1, dma64); | 1010 | ring = b43_setup_dmaring(dev, 1, 1, type); |
967 | if (!ring) | 1011 | if (!ring) |
968 | goto err_destroy_tx0; | 1012 | goto err_destroy_tx0; |
969 | dma->tx_ring1 = ring; | 1013 | dma->tx_ring1 = ring; |
970 | 1014 | ||
971 | ring = b43_setup_dmaring(dev, 2, 1, dma64); | 1015 | ring = b43_setup_dmaring(dev, 2, 1, type); |
972 | if (!ring) | 1016 | if (!ring) |
973 | goto err_destroy_tx1; | 1017 | goto err_destroy_tx1; |
974 | dma->tx_ring2 = ring; | 1018 | dma->tx_ring2 = ring; |
975 | 1019 | ||
976 | ring = b43_setup_dmaring(dev, 3, 1, dma64); | 1020 | ring = b43_setup_dmaring(dev, 3, 1, type); |
977 | if (!ring) | 1021 | if (!ring) |
978 | goto err_destroy_tx2; | 1022 | goto err_destroy_tx2; |
979 | dma->tx_ring3 = ring; | 1023 | dma->tx_ring3 = ring; |
980 | 1024 | ||
981 | ring = b43_setup_dmaring(dev, 4, 1, dma64); | 1025 | ring = b43_setup_dmaring(dev, 4, 1, type); |
982 | if (!ring) | 1026 | if (!ring) |
983 | goto err_destroy_tx3; | 1027 | goto err_destroy_tx3; |
984 | dma->tx_ring4 = ring; | 1028 | dma->tx_ring4 = ring; |
985 | 1029 | ||
986 | ring = b43_setup_dmaring(dev, 5, 1, dma64); | 1030 | ring = b43_setup_dmaring(dev, 5, 1, type); |
987 | if (!ring) | 1031 | if (!ring) |
988 | goto err_destroy_tx4; | 1032 | goto err_destroy_tx4; |
989 | dma->tx_ring5 = ring; | 1033 | dma->tx_ring5 = ring; |
990 | 1034 | ||
991 | /* setup RX DMA channels. */ | 1035 | /* setup RX DMA channels. */ |
992 | ring = b43_setup_dmaring(dev, 0, 0, dma64); | 1036 | ring = b43_setup_dmaring(dev, 0, 0, type); |
993 | if (!ring) | 1037 | if (!ring) |
994 | goto err_destroy_tx5; | 1038 | goto err_destroy_tx5; |
995 | dma->rx_ring0 = ring; | 1039 | dma->rx_ring0 = ring; |
996 | 1040 | ||
997 | if (dev->dev->id.revision < 5) { | 1041 | if (dev->dev->id.revision < 5) { |
998 | ring = b43_setup_dmaring(dev, 3, 0, dma64); | 1042 | ring = b43_setup_dmaring(dev, 3, 0, type); |
999 | if (!ring) | 1043 | if (!ring) |
1000 | goto err_destroy_rx0; | 1044 | goto err_destroy_rx0; |
1001 | dma->rx_ring3 = ring; | 1045 | dma->rx_ring3 = ring; |
1002 | } | 1046 | } |
1003 | 1047 | ||
1004 | b43dbg(dev->wl, "%d-bit DMA initialized\n", | 1048 | b43dbg(dev->wl, "%u-bit DMA initialized\n", |
1005 | (dmamask == DMA_64BIT_MASK) ? 64 : | 1049 | (unsigned int)type); |
1006 | (dmamask == DMA_32BIT_MASK) ? 32 : 30); | ||
1007 | err = 0; | 1050 | err = 0; |
1008 | out: | 1051 | out: |
1009 | return err; | 1052 | return err; |
@@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1146 | 1189 | ||
1147 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | 1190 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, |
1148 | hdrsize, 1); | 1191 | hdrsize, 1); |
1149 | if (dma_mapping_error(meta_hdr->dmaaddr)) { | 1192 | if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) { |
1150 | ring->current_slot = old_top_slot; | 1193 | ring->current_slot = old_top_slot; |
1151 | ring->used_slots = old_used_slots; | 1194 | ring->used_slots = old_used_slots; |
1152 | return -EIO; | 1195 | return -EIO; |
@@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1165 | 1208 | ||
1166 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1209 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1167 | /* create a bounce buffer in zone_dma on mapping failure. */ | 1210 | /* create a bounce buffer in zone_dma on mapping failure. */ |
1168 | if (dma_mapping_error(meta->dmaaddr)) { | 1211 | if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { |
1169 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 1212 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
1170 | if (!bounce_skb) { | 1213 | if (!bounce_skb) { |
1171 | ring->current_slot = old_top_slot; | 1214 | ring->current_slot = old_top_slot; |
@@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1179 | skb = bounce_skb; | 1222 | skb = bounce_skb; |
1180 | meta->skb = skb; | 1223 | meta->skb = skb; |
1181 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1224 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1182 | if (dma_mapping_error(meta->dmaaddr)) { | 1225 | if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { |
1183 | ring->current_slot = old_top_slot; | 1226 | ring->current_slot = old_top_slot; |
1184 | ring->used_slots = old_used_slots; | 1227 | ring->used_slots = old_used_slots; |
1185 | err = -EIO; | 1228 | err = -EIO; |
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 58db03ac536e..c0d6b69e6501 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h | |||
@@ -203,6 +203,12 @@ struct b43_dma_ops { | |||
203 | void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); | 203 | void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); |
204 | }; | 204 | }; |
205 | 205 | ||
206 | enum b43_dmatype { | ||
207 | B43_DMA_30BIT = 30, | ||
208 | B43_DMA_32BIT = 32, | ||
209 | B43_DMA_64BIT = 64, | ||
210 | }; | ||
211 | |||
206 | struct b43_dmaring { | 212 | struct b43_dmaring { |
207 | /* Lowlevel DMA ops. */ | 213 | /* Lowlevel DMA ops. */ |
208 | const struct b43_dma_ops *ops; | 214 | const struct b43_dma_ops *ops; |
@@ -235,8 +241,8 @@ struct b43_dmaring { | |||
235 | int index; | 241 | int index; |
236 | /* Boolean. Is this a TX ring? */ | 242 | /* Boolean. Is this a TX ring? */ |
237 | bool tx; | 243 | bool tx; |
238 | /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ | 244 | /* The type of DMA engine used. */ |
239 | bool dma64; | 245 | enum b43_dmatype type; |
240 | /* Boolean. Is this ring stopped at ieee80211 level? */ | 246 | /* Boolean. Is this ring stopped at ieee80211 level? */ |
241 | bool stopped; | 247 | bool stopped; |
242 | /* Lock, only used for TX. */ | 248 | /* Lock, only used for TX. */ |
@@ -255,8 +261,7 @@ static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) | |||
255 | return b43_read32(ring->dev, ring->mmio_base + offset); | 261 | return b43_read32(ring->dev, ring->mmio_base + offset); |
256 | } | 262 | } |
257 | 263 | ||
258 | static inline | 264 | static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) |
259 | void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) | ||
260 | { | 265 | { |
261 | b43_write32(ring->dev, ring->mmio_base + offset, value); | 266 | b43_write32(ring->dev, ring->mmio_base + offset, value); |
262 | } | 267 | } |
@@ -264,13 +269,6 @@ static inline | |||
264 | int b43_dma_init(struct b43_wldev *dev); | 269 | int b43_dma_init(struct b43_wldev *dev); |
265 | void b43_dma_free(struct b43_wldev *dev); | 270 | void b43_dma_free(struct b43_wldev *dev); |
266 | 271 | ||
267 | int b43_dmacontroller_rx_reset(struct b43_wldev *dev, | ||
268 | u16 dmacontroller_mmio_base, int dma64); | ||
269 | int b43_dmacontroller_tx_reset(struct b43_wldev *dev, | ||
270 | u16 dmacontroller_mmio_base, int dma64); | ||
271 | |||
272 | u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx); | ||
273 | |||
274 | void b43_dma_tx_suspend(struct b43_wldev *dev); | 272 | void b43_dma_tx_suspend(struct b43_wldev *dev); |
275 | void b43_dma_tx_resume(struct b43_wldev *dev); | 273 | void b43_dma_tx_resume(struct b43_wldev *dev); |
276 | 274 | ||
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index 4b590d8c65ff..0908335892db 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c | |||
@@ -116,7 +116,10 @@ static void b43_unregister_led(struct b43_led *led) | |||
116 | { | 116 | { |
117 | if (!led->dev) | 117 | if (!led->dev) |
118 | return; | 118 | return; |
119 | led_classdev_unregister(&led->led_dev); | 119 | if (led->dev->suspend_in_progress) |
120 | led_classdev_unregister_suspended(&led->led_dev); | ||
121 | else | ||
122 | led_classdev_unregister(&led->led_dev); | ||
120 | b43_led_turn_off(led->dev, led->index, led->activelow); | 123 | b43_led_turn_off(led->dev, led->index, led->activelow); |
121 | led->dev = NULL; | 124 | led->dev = NULL; |
122 | } | 125 | } |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 64c154d080d8..ef65c41af00f 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/wireless.h> | 38 | #include <linux/wireless.h> |
39 | #include <linux/workqueue.h> | 39 | #include <linux/workqueue.h> |
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/io.h> | ||
41 | #include <linux/dma-mapping.h> | 42 | #include <linux/dma-mapping.h> |
42 | #include <asm/unaligned.h> | 43 | #include <asm/unaligned.h> |
43 | 44 | ||
@@ -2554,10 +2555,10 @@ static int b43_rng_read(struct hwrng *rng, u32 * data) | |||
2554 | return (sizeof(u16)); | 2555 | return (sizeof(u16)); |
2555 | } | 2556 | } |
2556 | 2557 | ||
2557 | static void b43_rng_exit(struct b43_wl *wl) | 2558 | static void b43_rng_exit(struct b43_wl *wl, bool suspended) |
2558 | { | 2559 | { |
2559 | if (wl->rng_initialized) | 2560 | if (wl->rng_initialized) |
2560 | hwrng_unregister(&wl->rng); | 2561 | __hwrng_unregister(&wl->rng, suspended); |
2561 | } | 2562 | } |
2562 | 2563 | ||
2563 | static int b43_rng_init(struct b43_wl *wl) | 2564 | static int b43_rng_init(struct b43_wl *wl) |
@@ -3417,8 +3418,10 @@ static void b43_wireless_core_exit(struct b43_wldev *dev) | |||
3417 | macctl |= B43_MACCTL_PSM_JMP0; | 3418 | macctl |= B43_MACCTL_PSM_JMP0; |
3418 | b43_write32(dev, B43_MMIO_MACCTL, macctl); | 3419 | b43_write32(dev, B43_MMIO_MACCTL, macctl); |
3419 | 3420 | ||
3420 | b43_leds_exit(dev); | 3421 | if (!dev->suspend_in_progress) { |
3421 | b43_rng_exit(dev->wl); | 3422 | b43_leds_exit(dev); |
3423 | b43_rng_exit(dev->wl, false); | ||
3424 | } | ||
3422 | b43_dma_free(dev); | 3425 | b43_dma_free(dev); |
3423 | b43_chip_exit(dev); | 3426 | b43_chip_exit(dev); |
3424 | b43_radio_turn_off(dev, 1); | 3427 | b43_radio_turn_off(dev, 1); |
@@ -3534,11 +3537,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev) | |||
3534 | ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ | 3537 | ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ |
3535 | b43_upload_card_macaddress(dev); | 3538 | b43_upload_card_macaddress(dev); |
3536 | b43_security_init(dev); | 3539 | b43_security_init(dev); |
3537 | b43_rng_init(wl); | 3540 | if (!dev->suspend_in_progress) |
3541 | b43_rng_init(wl); | ||
3538 | 3542 | ||
3539 | b43_set_status(dev, B43_STAT_INITIALIZED); | 3543 | b43_set_status(dev, B43_STAT_INITIALIZED); |
3540 | 3544 | ||
3541 | b43_leds_init(dev); | 3545 | if (!dev->suspend_in_progress) |
3546 | b43_leds_init(dev); | ||
3542 | out: | 3547 | out: |
3543 | return err; | 3548 | return err; |
3544 | 3549 | ||
@@ -4135,6 +4140,7 @@ static int b43_suspend(struct ssb_device *dev, pm_message_t state) | |||
4135 | b43dbg(wl, "Suspending...\n"); | 4140 | b43dbg(wl, "Suspending...\n"); |
4136 | 4141 | ||
4137 | mutex_lock(&wl->mutex); | 4142 | mutex_lock(&wl->mutex); |
4143 | wldev->suspend_in_progress = true; | ||
4138 | wldev->suspend_init_status = b43_status(wldev); | 4144 | wldev->suspend_init_status = b43_status(wldev); |
4139 | if (wldev->suspend_init_status >= B43_STAT_STARTED) | 4145 | if (wldev->suspend_init_status >= B43_STAT_STARTED) |
4140 | b43_wireless_core_stop(wldev); | 4146 | b43_wireless_core_stop(wldev); |
@@ -4166,15 +4172,17 @@ static int b43_resume(struct ssb_device *dev) | |||
4166 | if (wldev->suspend_init_status >= B43_STAT_STARTED) { | 4172 | if (wldev->suspend_init_status >= B43_STAT_STARTED) { |
4167 | err = b43_wireless_core_start(wldev); | 4173 | err = b43_wireless_core_start(wldev); |
4168 | if (err) { | 4174 | if (err) { |
4175 | b43_leds_exit(wldev); | ||
4176 | b43_rng_exit(wldev->wl, true); | ||
4169 | b43_wireless_core_exit(wldev); | 4177 | b43_wireless_core_exit(wldev); |
4170 | b43err(wl, "Resume failed at core start\n"); | 4178 | b43err(wl, "Resume failed at core start\n"); |
4171 | goto out; | 4179 | goto out; |
4172 | } | 4180 | } |
4173 | } | 4181 | } |
4174 | mutex_unlock(&wl->mutex); | ||
4175 | |||
4176 | b43dbg(wl, "Device resumed.\n"); | 4182 | b43dbg(wl, "Device resumed.\n"); |
4177 | out: | 4183 | out: |
4184 | wldev->suspend_in_progress = false; | ||
4185 | mutex_unlock(&wl->mutex); | ||
4178 | return err; | 4186 | return err; |
4179 | } | 4187 | } |
4180 | 4188 | ||
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index 83161d9af813..6e08405e8026 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c | |||
@@ -1164,7 +1164,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1164 | { | 1164 | { |
1165 | const struct b43legacy_dma_ops *ops = ring->ops; | 1165 | const struct b43legacy_dma_ops *ops = ring->ops; |
1166 | u8 *header; | 1166 | u8 *header; |
1167 | int slot; | 1167 | int slot, old_top_slot, old_used_slots; |
1168 | int err; | 1168 | int err; |
1169 | struct b43legacy_dmadesc_generic *desc; | 1169 | struct b43legacy_dmadesc_generic *desc; |
1170 | struct b43legacy_dmadesc_meta *meta; | 1170 | struct b43legacy_dmadesc_meta *meta; |
@@ -1174,6 +1174,9 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1174 | #define SLOTS_PER_PACKET 2 | 1174 | #define SLOTS_PER_PACKET 2 |
1175 | B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); | 1175 | B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); |
1176 | 1176 | ||
1177 | old_top_slot = ring->current_slot; | ||
1178 | old_used_slots = ring->used_slots; | ||
1179 | |||
1177 | /* Get a slot for the header. */ | 1180 | /* Get a slot for the header. */ |
1178 | slot = request_slot(ring); | 1181 | slot = request_slot(ring); |
1179 | desc = ops->idx2desc(ring, slot, &meta_hdr); | 1182 | desc = ops->idx2desc(ring, slot, &meta_hdr); |
@@ -1181,9 +1184,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1181 | 1184 | ||
1182 | header = &(ring->txhdr_cache[slot * sizeof( | 1185 | header = &(ring->txhdr_cache[slot * sizeof( |
1183 | struct b43legacy_txhdr_fw3)]); | 1186 | struct b43legacy_txhdr_fw3)]); |
1184 | b43legacy_generate_txhdr(ring->dev, header, | 1187 | err = b43legacy_generate_txhdr(ring->dev, header, |
1185 | skb->data, skb->len, ctl, | 1188 | skb->data, skb->len, ctl, |
1186 | generate_cookie(ring, slot)); | 1189 | generate_cookie(ring, slot)); |
1190 | if (unlikely(err)) { | ||
1191 | ring->current_slot = old_top_slot; | ||
1192 | ring->used_slots = old_used_slots; | ||
1193 | return err; | ||
1194 | } | ||
1187 | 1195 | ||
1188 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | 1196 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, |
1189 | sizeof(struct b43legacy_txhdr_fw3), 1); | 1197 | sizeof(struct b43legacy_txhdr_fw3), 1); |
@@ -1206,6 +1214,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1206 | if (dma_mapping_error(meta->dmaaddr)) { | 1214 | if (dma_mapping_error(meta->dmaaddr)) { |
1207 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 1215 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
1208 | if (!bounce_skb) { | 1216 | if (!bounce_skb) { |
1217 | ring->current_slot = old_top_slot; | ||
1218 | ring->used_slots = old_used_slots; | ||
1209 | err = -ENOMEM; | 1219 | err = -ENOMEM; |
1210 | goto out_unmap_hdr; | 1220 | goto out_unmap_hdr; |
1211 | } | 1221 | } |
@@ -1216,6 +1226,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1216 | meta->skb = skb; | 1226 | meta->skb = skb; |
1217 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1227 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1218 | if (dma_mapping_error(meta->dmaaddr)) { | 1228 | if (dma_mapping_error(meta->dmaaddr)) { |
1229 | ring->current_slot = old_top_slot; | ||
1230 | ring->used_slots = old_used_slots; | ||
1219 | err = -EIO; | 1231 | err = -EIO; |
1220 | goto out_free_bounce; | 1232 | goto out_free_bounce; |
1221 | } | 1233 | } |
@@ -1282,6 +1294,13 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, | |||
1282 | B43legacy_BUG_ON(ring->stopped); | 1294 | B43legacy_BUG_ON(ring->stopped); |
1283 | 1295 | ||
1284 | err = dma_tx_fragment(ring, skb, ctl); | 1296 | err = dma_tx_fragment(ring, skb, ctl); |
1297 | if (unlikely(err == -ENOKEY)) { | ||
1298 | /* Drop this packet, as we don't have the encryption key | ||
1299 | * anymore and must not transmit it unencrypted. */ | ||
1300 | dev_kfree_skb_any(skb); | ||
1301 | err = 0; | ||
1302 | goto out_unlock; | ||
1303 | } | ||
1285 | if (unlikely(err)) { | 1304 | if (unlikely(err)) { |
1286 | b43legacyerr(dev->wl, "DMA tx mapping failure\n"); | 1305 | b43legacyerr(dev->wl, "DMA tx mapping failure\n"); |
1287 | goto out_unlock; | 1306 | goto out_unlock; |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index aa20d5d56e2f..53f7f2e97615 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -3160,8 +3160,6 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev) | |||
3160 | b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4); | 3160 | b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4); |
3161 | 3161 | ||
3162 | ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ | 3162 | ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ |
3163 | memset(wl->bssid, 0, ETH_ALEN); | ||
3164 | memset(wl->mac_addr, 0, ETH_ALEN); | ||
3165 | b43legacy_upload_card_macaddress(dev); | 3163 | b43legacy_upload_card_macaddress(dev); |
3166 | b43legacy_security_init(dev); | 3164 | b43legacy_security_init(dev); |
3167 | b43legacy_rng_init(wl); | 3165 | b43legacy_rng_init(wl); |
@@ -3263,6 +3261,13 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) | |||
3263 | * LEDs that are registered later depend on it. */ | 3261 | * LEDs that are registered later depend on it. */ |
3264 | b43legacy_rfkill_init(dev); | 3262 | b43legacy_rfkill_init(dev); |
3265 | 3263 | ||
3264 | /* Kill all old instance specific information to make sure | ||
3265 | * the card won't use it in the short timeframe between start | ||
3266 | * and mac80211 reconfiguring it. */ | ||
3267 | memset(wl->bssid, 0, ETH_ALEN); | ||
3268 | memset(wl->mac_addr, 0, ETH_ALEN); | ||
3269 | wl->filter_flags = 0; | ||
3270 | |||
3266 | mutex_lock(&wl->mutex); | 3271 | mutex_lock(&wl->mutex); |
3267 | 3272 | ||
3268 | if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { | 3273 | if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { |
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c index e4f4c5c39e33..bcdd54eb2edb 100644 --- a/drivers/net/wireless/b43legacy/pio.c +++ b/drivers/net/wireless/b43legacy/pio.c | |||
@@ -181,7 +181,7 @@ union txhdr_union { | |||
181 | struct b43legacy_txhdr_fw3 txhdr_fw3; | 181 | struct b43legacy_txhdr_fw3 txhdr_fw3; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue, | 184 | static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue, |
185 | struct sk_buff *skb, | 185 | struct sk_buff *skb, |
186 | struct b43legacy_pio_txpacket *packet, | 186 | struct b43legacy_pio_txpacket *packet, |
187 | size_t txhdr_size) | 187 | size_t txhdr_size) |
@@ -189,14 +189,17 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue, | |||
189 | union txhdr_union txhdr_data; | 189 | union txhdr_union txhdr_data; |
190 | u8 *txhdr = NULL; | 190 | u8 *txhdr = NULL; |
191 | unsigned int octets; | 191 | unsigned int octets; |
192 | int err; | ||
192 | 193 | ||
193 | txhdr = (u8 *)(&txhdr_data.txhdr_fw3); | 194 | txhdr = (u8 *)(&txhdr_data.txhdr_fw3); |
194 | 195 | ||
195 | B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); | 196 | B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); |
196 | b43legacy_generate_txhdr(queue->dev, | 197 | err = b43legacy_generate_txhdr(queue->dev, |
197 | txhdr, skb->data, skb->len, | 198 | txhdr, skb->data, skb->len, |
198 | &packet->txstat.control, | 199 | &packet->txstat.control, |
199 | generate_cookie(queue, packet)); | 200 | generate_cookie(queue, packet)); |
201 | if (err) | ||
202 | return err; | ||
200 | 203 | ||
201 | tx_start(queue); | 204 | tx_start(queue); |
202 | octets = skb->len + txhdr_size; | 205 | octets = skb->len + txhdr_size; |
@@ -204,6 +207,8 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue, | |||
204 | octets--; | 207 | octets--; |
205 | tx_data(queue, txhdr, (u8 *)skb->data, octets); | 208 | tx_data(queue, txhdr, (u8 *)skb->data, octets); |
206 | tx_complete(queue, skb); | 209 | tx_complete(queue, skb); |
210 | |||
211 | return 0; | ||
207 | } | 212 | } |
208 | 213 | ||
209 | static void free_txpacket(struct b43legacy_pio_txpacket *packet, | 214 | static void free_txpacket(struct b43legacy_pio_txpacket *packet, |
@@ -226,6 +231,7 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet) | |||
226 | struct b43legacy_pioqueue *queue = packet->queue; | 231 | struct b43legacy_pioqueue *queue = packet->queue; |
227 | struct sk_buff *skb = packet->skb; | 232 | struct sk_buff *skb = packet->skb; |
228 | u16 octets; | 233 | u16 octets; |
234 | int err; | ||
229 | 235 | ||
230 | octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3); | 236 | octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3); |
231 | if (queue->tx_devq_size < octets) { | 237 | if (queue->tx_devq_size < octets) { |
@@ -247,8 +253,14 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet) | |||
247 | if (queue->tx_devq_used + octets > queue->tx_devq_size) | 253 | if (queue->tx_devq_used + octets > queue->tx_devq_size) |
248 | return -EBUSY; | 254 | return -EBUSY; |
249 | /* Now poke the device. */ | 255 | /* Now poke the device. */ |
250 | pio_tx_write_fragment(queue, skb, packet, | 256 | err = pio_tx_write_fragment(queue, skb, packet, |
251 | sizeof(struct b43legacy_txhdr_fw3)); | 257 | sizeof(struct b43legacy_txhdr_fw3)); |
258 | if (unlikely(err == -ENOKEY)) { | ||
259 | /* Drop this packet, as we don't have the encryption key | ||
260 | * anymore and must not transmit it unencrypted. */ | ||
261 | free_txpacket(packet, 1); | ||
262 | return 0; | ||
263 | } | ||
252 | 264 | ||
253 | /* Account for the packet size. | 265 | /* Account for the packet size. |
254 | * (We must not overflow the device TX queue) | 266 | * (We must not overflow the device TX queue) |
@@ -486,6 +498,9 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, | |||
486 | queue = parse_cookie(dev, status->cookie, &packet); | 498 | queue = parse_cookie(dev, status->cookie, &packet); |
487 | B43legacy_WARN_ON(!queue); | 499 | B43legacy_WARN_ON(!queue); |
488 | 500 | ||
501 | if (!packet->skb) | ||
502 | return; | ||
503 | |||
489 | queue->tx_devq_packets--; | 504 | queue->tx_devq_packets--; |
490 | queue->tx_devq_used -= (packet->skb->len + | 505 | queue->tx_devq_used -= (packet->skb->len + |
491 | sizeof(struct b43legacy_txhdr_fw3)); | 506 | sizeof(struct b43legacy_txhdr_fw3)); |
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c index e20c552442d5..d84408a82db9 100644 --- a/drivers/net/wireless/b43legacy/xmit.c +++ b/drivers/net/wireless/b43legacy/xmit.c | |||
@@ -181,7 +181,7 @@ static u8 b43legacy_calc_fallback_rate(u8 bitrate) | |||
181 | return 0; | 181 | return 0; |
182 | } | 182 | } |
183 | 183 | ||
184 | static void generate_txhdr_fw3(struct b43legacy_wldev *dev, | 184 | static int generate_txhdr_fw3(struct b43legacy_wldev *dev, |
185 | struct b43legacy_txhdr_fw3 *txhdr, | 185 | struct b43legacy_txhdr_fw3 *txhdr, |
186 | const unsigned char *fragment_data, | 186 | const unsigned char *fragment_data, |
187 | unsigned int fragment_len, | 187 | unsigned int fragment_len, |
@@ -252,6 +252,13 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev, | |||
252 | iv_len = min((size_t)txctl->iv_len, | 252 | iv_len = min((size_t)txctl->iv_len, |
253 | ARRAY_SIZE(txhdr->iv)); | 253 | ARRAY_SIZE(txhdr->iv)); |
254 | memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); | 254 | memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); |
255 | } else { | ||
256 | /* This key is invalid. This might only happen | ||
257 | * in a short timeframe after machine resume before | ||
258 | * we were able to reconfigure keys. | ||
259 | * Drop this packet completely. Do not transmit it | ||
260 | * unencrypted to avoid leaking information. */ | ||
261 | return -ENOKEY; | ||
255 | } | 262 | } |
256 | } | 263 | } |
257 | b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) | 264 | b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) |
@@ -345,16 +352,18 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev, | |||
345 | /* Apply the bitfields */ | 352 | /* Apply the bitfields */ |
346 | txhdr->mac_ctl = cpu_to_le32(mac_ctl); | 353 | txhdr->mac_ctl = cpu_to_le32(mac_ctl); |
347 | txhdr->phy_ctl = cpu_to_le16(phy_ctl); | 354 | txhdr->phy_ctl = cpu_to_le16(phy_ctl); |
355 | |||
356 | return 0; | ||
348 | } | 357 | } |
349 | 358 | ||
350 | void b43legacy_generate_txhdr(struct b43legacy_wldev *dev, | 359 | int b43legacy_generate_txhdr(struct b43legacy_wldev *dev, |
351 | u8 *txhdr, | 360 | u8 *txhdr, |
352 | const unsigned char *fragment_data, | 361 | const unsigned char *fragment_data, |
353 | unsigned int fragment_len, | 362 | unsigned int fragment_len, |
354 | const struct ieee80211_tx_control *txctl, | 363 | const struct ieee80211_tx_control *txctl, |
355 | u16 cookie) | 364 | u16 cookie) |
356 | { | 365 | { |
357 | generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, | 366 | return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, |
358 | fragment_data, fragment_len, | 367 | fragment_data, fragment_len, |
359 | txctl, cookie); | 368 | txctl, cookie); |
360 | } | 369 | } |
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h index 8a155d0a5d1f..bab47928a0c9 100644 --- a/drivers/net/wireless/b43legacy/xmit.h +++ b/drivers/net/wireless/b43legacy/xmit.h | |||
@@ -76,7 +76,7 @@ struct b43legacy_txhdr_fw3 { | |||
76 | 76 | ||
77 | 77 | ||
78 | 78 | ||
79 | void b43legacy_generate_txhdr(struct b43legacy_wldev *dev, | 79 | int b43legacy_generate_txhdr(struct b43legacy_wldev *dev, |
80 | u8 *txhdr, | 80 | u8 *txhdr, |
81 | const unsigned char *fragment_data, | 81 | const unsigned char *fragment_data, |
82 | unsigned int fragment_len, | 82 | unsigned int fragment_len, |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 2ab107f45793..5bf9e00b070c 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -162,7 +162,7 @@ that only one external action is invoked at a time. | |||
162 | #include <linux/firmware.h> | 162 | #include <linux/firmware.h> |
163 | #include <linux/acpi.h> | 163 | #include <linux/acpi.h> |
164 | #include <linux/ctype.h> | 164 | #include <linux/ctype.h> |
165 | #include <linux/latency.h> | 165 | #include <linux/pm_qos_params.h> |
166 | 166 | ||
167 | #include "ipw2100.h" | 167 | #include "ipw2100.h" |
168 | 168 | ||
@@ -1701,7 +1701,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1701 | /* the ipw2100 hardware really doesn't want power management delays | 1701 | /* the ipw2100 hardware really doesn't want power management delays |
1702 | * longer than 175usec | 1702 | * longer than 175usec |
1703 | */ | 1703 | */ |
1704 | modify_acceptable_latency("ipw2100", 175); | 1704 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175); |
1705 | 1705 | ||
1706 | /* If the interrupt is enabled, turn it off... */ | 1706 | /* If the interrupt is enabled, turn it off... */ |
1707 | spin_lock_irqsave(&priv->low_lock, flags); | 1707 | spin_lock_irqsave(&priv->low_lock, flags); |
@@ -1856,7 +1856,8 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1856 | ipw2100_disable_interrupts(priv); | 1856 | ipw2100_disable_interrupts(priv); |
1857 | spin_unlock_irqrestore(&priv->low_lock, flags); | 1857 | spin_unlock_irqrestore(&priv->low_lock, flags); |
1858 | 1858 | ||
1859 | modify_acceptable_latency("ipw2100", INFINITE_LATENCY); | 1859 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", |
1860 | PM_QOS_DEFAULT_VALUE); | ||
1860 | 1861 | ||
1861 | /* We have to signal any supplicant if we are disassociating */ | 1862 | /* We have to signal any supplicant if we are disassociating */ |
1862 | if (associated) | 1863 | if (associated) |
@@ -6554,7 +6555,8 @@ static int __init ipw2100_init(void) | |||
6554 | if (ret) | 6555 | if (ret) |
6555 | goto out; | 6556 | goto out; |
6556 | 6557 | ||
6557 | set_acceptable_latency("ipw2100", INFINITE_LATENCY); | 6558 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", |
6559 | PM_QOS_DEFAULT_VALUE); | ||
6558 | #ifdef CONFIG_IPW2100_DEBUG | 6560 | #ifdef CONFIG_IPW2100_DEBUG |
6559 | ipw2100_debug_level = debug; | 6561 | ipw2100_debug_level = debug; |
6560 | ret = driver_create_file(&ipw2100_pci_driver.driver, | 6562 | ret = driver_create_file(&ipw2100_pci_driver.driver, |
@@ -6576,7 +6578,7 @@ static void __exit ipw2100_exit(void) | |||
6576 | &driver_attr_debug_level); | 6578 | &driver_attr_debug_level); |
6577 | #endif | 6579 | #endif |
6578 | pci_unregister_driver(&ipw2100_pci_driver); | 6580 | pci_unregister_driver(&ipw2100_pci_driver); |
6579 | remove_acceptable_latency("ipw2100"); | 6581 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100"); |
6580 | } | 6582 | } |
6581 | 6583 | ||
6582 | module_init(ipw2100_init); | 6584 | module_init(ipw2100_init); |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index f55c75712b55..5ee1ad69898b 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -4207,13 +4207,13 @@ static u8 ratio2dB[100] = { | |||
4207 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | 4207 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ |
4208 | int iwl3945_calc_db_from_ratio(int sig_ratio) | 4208 | int iwl3945_calc_db_from_ratio(int sig_ratio) |
4209 | { | 4209 | { |
4210 | /* Anything above 1000:1 just report as 60 dB */ | 4210 | /* 1000:1 or higher just report as 60 dB */ |
4211 | if (sig_ratio > 1000) | 4211 | if (sig_ratio >= 1000) |
4212 | return 60; | 4212 | return 60; |
4213 | 4213 | ||
4214 | /* Above 100:1, divide by 10 and use table, | 4214 | /* 100:1 or higher, divide by 10 and use table, |
4215 | * add 20 dB to make up for divide by 10 */ | 4215 | * add 20 dB to make up for divide by 10 */ |
4216 | if (sig_ratio > 100) | 4216 | if (sig_ratio >= 100) |
4217 | return (20 + (int)ratio2dB[sig_ratio/10]); | 4217 | return (20 + (int)ratio2dB[sig_ratio/10]); |
4218 | 4218 | ||
4219 | /* We shouldn't see this */ | 4219 | /* We shouldn't see this */ |
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index d2fa079fbc4c..f479c1af6782 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c | |||
@@ -195,7 +195,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *arg); /* Runs after card | |||
195 | static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */ | 195 | static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */ |
196 | 196 | ||
197 | /* Hardware configuration */ | 197 | /* Hardware configuration */ |
198 | static void netwave_doreset(kio_addr_t iobase, u_char __iomem *ramBase); | 198 | static void netwave_doreset(unsigned int iobase, u_char __iomem *ramBase); |
199 | static void netwave_reset(struct net_device *dev); | 199 | static void netwave_reset(struct net_device *dev); |
200 | 200 | ||
201 | /* Misc device stuff */ | 201 | /* Misc device stuff */ |
@@ -309,7 +309,7 @@ static inline void wait_WOC(unsigned int iobase) | |||
309 | } | 309 | } |
310 | 310 | ||
311 | static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, | 311 | static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, |
312 | kio_addr_t iobase) { | 312 | unsigned int iobase) { |
313 | u_short resultBuffer; | 313 | u_short resultBuffer; |
314 | 314 | ||
315 | /* if time since last snapshot is > 1 sec. (100 jiffies?) then take | 315 | /* if time since last snapshot is > 1 sec. (100 jiffies?) then take |
@@ -340,7 +340,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, | |||
340 | static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev) | 340 | static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev) |
341 | { | 341 | { |
342 | unsigned long flags; | 342 | unsigned long flags; |
343 | kio_addr_t iobase = dev->base_addr; | 343 | unsigned int iobase = dev->base_addr; |
344 | netwave_private *priv = netdev_priv(dev); | 344 | netwave_private *priv = netdev_priv(dev); |
345 | u_char __iomem *ramBase = priv->ramBase; | 345 | u_char __iomem *ramBase = priv->ramBase; |
346 | struct iw_statistics* wstats; | 346 | struct iw_statistics* wstats; |
@@ -471,7 +471,7 @@ static int netwave_set_nwid(struct net_device *dev, | |||
471 | char *extra) | 471 | char *extra) |
472 | { | 472 | { |
473 | unsigned long flags; | 473 | unsigned long flags; |
474 | kio_addr_t iobase = dev->base_addr; | 474 | unsigned int iobase = dev->base_addr; |
475 | netwave_private *priv = netdev_priv(dev); | 475 | netwave_private *priv = netdev_priv(dev); |
476 | u_char __iomem *ramBase = priv->ramBase; | 476 | u_char __iomem *ramBase = priv->ramBase; |
477 | 477 | ||
@@ -518,7 +518,7 @@ static int netwave_set_scramble(struct net_device *dev, | |||
518 | char *key) | 518 | char *key) |
519 | { | 519 | { |
520 | unsigned long flags; | 520 | unsigned long flags; |
521 | kio_addr_t iobase = dev->base_addr; | 521 | unsigned int iobase = dev->base_addr; |
522 | netwave_private *priv = netdev_priv(dev); | 522 | netwave_private *priv = netdev_priv(dev); |
523 | u_char __iomem *ramBase = priv->ramBase; | 523 | u_char __iomem *ramBase = priv->ramBase; |
524 | 524 | ||
@@ -621,7 +621,7 @@ static int netwave_get_snap(struct net_device *dev, | |||
621 | char *extra) | 621 | char *extra) |
622 | { | 622 | { |
623 | unsigned long flags; | 623 | unsigned long flags; |
624 | kio_addr_t iobase = dev->base_addr; | 624 | unsigned int iobase = dev->base_addr; |
625 | netwave_private *priv = netdev_priv(dev); | 625 | netwave_private *priv = netdev_priv(dev); |
626 | u_char __iomem *ramBase = priv->ramBase; | 626 | u_char __iomem *ramBase = priv->ramBase; |
627 | 627 | ||
@@ -874,7 +874,7 @@ static int netwave_resume(struct pcmcia_device *link) | |||
874 | * | 874 | * |
875 | * Proper hardware reset of the card. | 875 | * Proper hardware reset of the card. |
876 | */ | 876 | */ |
877 | static void netwave_doreset(kio_addr_t ioBase, u_char __iomem *ramBase) | 877 | static void netwave_doreset(unsigned int ioBase, u_char __iomem *ramBase) |
878 | { | 878 | { |
879 | /* Reset card */ | 879 | /* Reset card */ |
880 | wait_WOC(ioBase); | 880 | wait_WOC(ioBase); |
@@ -892,7 +892,7 @@ static void netwave_reset(struct net_device *dev) { | |||
892 | /* u_char state; */ | 892 | /* u_char state; */ |
893 | netwave_private *priv = netdev_priv(dev); | 893 | netwave_private *priv = netdev_priv(dev); |
894 | u_char __iomem *ramBase = priv->ramBase; | 894 | u_char __iomem *ramBase = priv->ramBase; |
895 | kio_addr_t iobase = dev->base_addr; | 895 | unsigned int iobase = dev->base_addr; |
896 | 896 | ||
897 | DEBUG(0, "netwave_reset: Done with hardware reset\n"); | 897 | DEBUG(0, "netwave_reset: Done with hardware reset\n"); |
898 | 898 | ||
@@ -973,7 +973,7 @@ static int netwave_hw_xmit(unsigned char* data, int len, | |||
973 | 973 | ||
974 | netwave_private *priv = netdev_priv(dev); | 974 | netwave_private *priv = netdev_priv(dev); |
975 | u_char __iomem * ramBase = priv->ramBase; | 975 | u_char __iomem * ramBase = priv->ramBase; |
976 | kio_addr_t iobase = dev->base_addr; | 976 | unsigned int iobase = dev->base_addr; |
977 | 977 | ||
978 | /* Disable interrupts & save flags */ | 978 | /* Disable interrupts & save flags */ |
979 | spin_lock_irqsave(&priv->spinlock, flags); | 979 | spin_lock_irqsave(&priv->spinlock, flags); |
@@ -1065,7 +1065,7 @@ static int netwave_start_xmit(struct sk_buff *skb, struct net_device *dev) { | |||
1065 | */ | 1065 | */ |
1066 | static irqreturn_t netwave_interrupt(int irq, void* dev_id) | 1066 | static irqreturn_t netwave_interrupt(int irq, void* dev_id) |
1067 | { | 1067 | { |
1068 | kio_addr_t iobase; | 1068 | unsigned int iobase; |
1069 | u_char __iomem *ramBase; | 1069 | u_char __iomem *ramBase; |
1070 | struct net_device *dev = (struct net_device *)dev_id; | 1070 | struct net_device *dev = (struct net_device *)dev_id; |
1071 | struct netwave_private *priv = netdev_priv(dev); | 1071 | struct netwave_private *priv = netdev_priv(dev); |
@@ -1235,7 +1235,7 @@ static int netwave_rx(struct net_device *dev) | |||
1235 | { | 1235 | { |
1236 | netwave_private *priv = netdev_priv(dev); | 1236 | netwave_private *priv = netdev_priv(dev); |
1237 | u_char __iomem *ramBase = priv->ramBase; | 1237 | u_char __iomem *ramBase = priv->ramBase; |
1238 | kio_addr_t iobase = dev->base_addr; | 1238 | unsigned int iobase = dev->base_addr; |
1239 | u_char rxStatus; | 1239 | u_char rxStatus; |
1240 | struct sk_buff *skb = NULL; | 1240 | struct sk_buff *skb = NULL; |
1241 | unsigned int curBuffer, | 1241 | unsigned int curBuffer, |
@@ -1388,7 +1388,7 @@ module_exit(exit_netwave_cs); | |||
1388 | */ | 1388 | */ |
1389 | static void set_multicast_list(struct net_device *dev) | 1389 | static void set_multicast_list(struct net_device *dev) |
1390 | { | 1390 | { |
1391 | kio_addr_t iobase = dev->base_addr; | 1391 | unsigned int iobase = dev->base_addr; |
1392 | netwave_private *priv = netdev_priv(dev); | 1392 | netwave_private *priv = netdev_priv(dev); |
1393 | u_char __iomem * ramBase = priv->ramBase; | 1393 | u_char __iomem * ramBase = priv->ramBase; |
1394 | u_char rcvMode = 0; | 1394 | u_char rcvMode = 0; |
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index c2037b2a05bf..06eea6ab7bf0 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c | |||
@@ -149,7 +149,7 @@ psa_write(struct net_device * dev, | |||
149 | net_local *lp = netdev_priv(dev); | 149 | net_local *lp = netdev_priv(dev); |
150 | u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1); | 150 | u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1); |
151 | int count = 0; | 151 | int count = 0; |
152 | kio_addr_t base = dev->base_addr; | 152 | unsigned int base = dev->base_addr; |
153 | /* As there seem to have no flag PSA_BUSY as in the ISA model, we are | 153 | /* As there seem to have no flag PSA_BUSY as in the ISA model, we are |
154 | * oblige to verify this address to know when the PSA is ready... */ | 154 | * oblige to verify this address to know when the PSA is ready... */ |
155 | volatile u_char __iomem *verify = lp->mem + PSA_ADDR + | 155 | volatile u_char __iomem *verify = lp->mem + PSA_ADDR + |
@@ -708,7 +708,7 @@ static void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqua | |||
708 | /* Perform a handover to a new WavePoint */ | 708 | /* Perform a handover to a new WavePoint */ |
709 | static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp) | 709 | static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp) |
710 | { | 710 | { |
711 | kio_addr_t base = lp->dev->base_addr; | 711 | unsigned int base = lp->dev->base_addr; |
712 | mm_t m; | 712 | mm_t m; |
713 | unsigned long flags; | 713 | unsigned long flags; |
714 | 714 | ||
@@ -821,7 +821,7 @@ wv_82593_cmd(struct net_device * dev, | |||
821 | int cmd, | 821 | int cmd, |
822 | int result) | 822 | int result) |
823 | { | 823 | { |
824 | kio_addr_t base = dev->base_addr; | 824 | unsigned int base = dev->base_addr; |
825 | int status; | 825 | int status; |
826 | int wait_completed; | 826 | int wait_completed; |
827 | long spin; | 827 | long spin; |
@@ -945,7 +945,7 @@ read_ringbuf(struct net_device * dev, | |||
945 | char * buf, | 945 | char * buf, |
946 | int len) | 946 | int len) |
947 | { | 947 | { |
948 | kio_addr_t base = dev->base_addr; | 948 | unsigned int base = dev->base_addr; |
949 | int ring_ptr = addr; | 949 | int ring_ptr = addr; |
950 | int chunk_len; | 950 | int chunk_len; |
951 | char * buf_ptr = buf; | 951 | char * buf_ptr = buf; |
@@ -1096,7 +1096,7 @@ wv_psa_show(psa_t * p) | |||
1096 | static void | 1096 | static void |
1097 | wv_mmc_show(struct net_device * dev) | 1097 | wv_mmc_show(struct net_device * dev) |
1098 | { | 1098 | { |
1099 | kio_addr_t base = dev->base_addr; | 1099 | unsigned int base = dev->base_addr; |
1100 | net_local * lp = netdev_priv(dev); | 1100 | net_local * lp = netdev_priv(dev); |
1101 | mmr_t m; | 1101 | mmr_t m; |
1102 | 1102 | ||
@@ -1275,7 +1275,7 @@ wv_packet_info(u_char * p, /* Packet to dump */ | |||
1275 | static inline void | 1275 | static inline void |
1276 | wv_init_info(struct net_device * dev) | 1276 | wv_init_info(struct net_device * dev) |
1277 | { | 1277 | { |
1278 | kio_addr_t base = dev->base_addr; | 1278 | unsigned int base = dev->base_addr; |
1279 | psa_t psa; | 1279 | psa_t psa; |
1280 | DECLARE_MAC_BUF(mac); | 1280 | DECLARE_MAC_BUF(mac); |
1281 | 1281 | ||
@@ -1294,7 +1294,7 @@ wv_init_info(struct net_device * dev) | |||
1294 | 1294 | ||
1295 | #ifdef DEBUG_BASIC_SHOW | 1295 | #ifdef DEBUG_BASIC_SHOW |
1296 | /* Now, let's go for the basic stuff */ | 1296 | /* Now, let's go for the basic stuff */ |
1297 | printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, " | 1297 | printk(KERN_NOTICE "%s: WaveLAN: port %#x, irq %d, " |
1298 | "hw_addr %s", | 1298 | "hw_addr %s", |
1299 | dev->name, base, dev->irq, | 1299 | dev->name, base, dev->irq, |
1300 | print_mac(mac, dev->dev_addr)); | 1300 | print_mac(mac, dev->dev_addr)); |
@@ -1828,7 +1828,7 @@ static int wavelan_set_nwid(struct net_device *dev, | |||
1828 | union iwreq_data *wrqu, | 1828 | union iwreq_data *wrqu, |
1829 | char *extra) | 1829 | char *extra) |
1830 | { | 1830 | { |
1831 | kio_addr_t base = dev->base_addr; | 1831 | unsigned int base = dev->base_addr; |
1832 | net_local *lp = netdev_priv(dev); | 1832 | net_local *lp = netdev_priv(dev); |
1833 | psa_t psa; | 1833 | psa_t psa; |
1834 | mm_t m; | 1834 | mm_t m; |
@@ -1918,7 +1918,7 @@ static int wavelan_set_freq(struct net_device *dev, | |||
1918 | union iwreq_data *wrqu, | 1918 | union iwreq_data *wrqu, |
1919 | char *extra) | 1919 | char *extra) |
1920 | { | 1920 | { |
1921 | kio_addr_t base = dev->base_addr; | 1921 | unsigned int base = dev->base_addr; |
1922 | net_local *lp = netdev_priv(dev); | 1922 | net_local *lp = netdev_priv(dev); |
1923 | unsigned long flags; | 1923 | unsigned long flags; |
1924 | int ret; | 1924 | int ret; |
@@ -1948,7 +1948,7 @@ static int wavelan_get_freq(struct net_device *dev, | |||
1948 | union iwreq_data *wrqu, | 1948 | union iwreq_data *wrqu, |
1949 | char *extra) | 1949 | char *extra) |
1950 | { | 1950 | { |
1951 | kio_addr_t base = dev->base_addr; | 1951 | unsigned int base = dev->base_addr; |
1952 | net_local *lp = netdev_priv(dev); | 1952 | net_local *lp = netdev_priv(dev); |
1953 | psa_t psa; | 1953 | psa_t psa; |
1954 | unsigned long flags; | 1954 | unsigned long flags; |
@@ -1994,7 +1994,7 @@ static int wavelan_set_sens(struct net_device *dev, | |||
1994 | union iwreq_data *wrqu, | 1994 | union iwreq_data *wrqu, |
1995 | char *extra) | 1995 | char *extra) |
1996 | { | 1996 | { |
1997 | kio_addr_t base = dev->base_addr; | 1997 | unsigned int base = dev->base_addr; |
1998 | net_local *lp = netdev_priv(dev); | 1998 | net_local *lp = netdev_priv(dev); |
1999 | psa_t psa; | 1999 | psa_t psa; |
2000 | unsigned long flags; | 2000 | unsigned long flags; |
@@ -2060,7 +2060,7 @@ static int wavelan_set_encode(struct net_device *dev, | |||
2060 | union iwreq_data *wrqu, | 2060 | union iwreq_data *wrqu, |
2061 | char *extra) | 2061 | char *extra) |
2062 | { | 2062 | { |
2063 | kio_addr_t base = dev->base_addr; | 2063 | unsigned int base = dev->base_addr; |
2064 | net_local *lp = netdev_priv(dev); | 2064 | net_local *lp = netdev_priv(dev); |
2065 | unsigned long flags; | 2065 | unsigned long flags; |
2066 | psa_t psa; | 2066 | psa_t psa; |
@@ -2130,7 +2130,7 @@ static int wavelan_get_encode(struct net_device *dev, | |||
2130 | union iwreq_data *wrqu, | 2130 | union iwreq_data *wrqu, |
2131 | char *extra) | 2131 | char *extra) |
2132 | { | 2132 | { |
2133 | kio_addr_t base = dev->base_addr; | 2133 | unsigned int base = dev->base_addr; |
2134 | net_local *lp = netdev_priv(dev); | 2134 | net_local *lp = netdev_priv(dev); |
2135 | psa_t psa; | 2135 | psa_t psa; |
2136 | unsigned long flags; | 2136 | unsigned long flags; |
@@ -2349,7 +2349,7 @@ static int wavelan_get_range(struct net_device *dev, | |||
2349 | union iwreq_data *wrqu, | 2349 | union iwreq_data *wrqu, |
2350 | char *extra) | 2350 | char *extra) |
2351 | { | 2351 | { |
2352 | kio_addr_t base = dev->base_addr; | 2352 | unsigned int base = dev->base_addr; |
2353 | net_local *lp = netdev_priv(dev); | 2353 | net_local *lp = netdev_priv(dev); |
2354 | struct iw_range *range = (struct iw_range *) extra; | 2354 | struct iw_range *range = (struct iw_range *) extra; |
2355 | unsigned long flags; | 2355 | unsigned long flags; |
@@ -2425,7 +2425,7 @@ static int wavelan_set_qthr(struct net_device *dev, | |||
2425 | union iwreq_data *wrqu, | 2425 | union iwreq_data *wrqu, |
2426 | char *extra) | 2426 | char *extra) |
2427 | { | 2427 | { |
2428 | kio_addr_t base = dev->base_addr; | 2428 | unsigned int base = dev->base_addr; |
2429 | net_local *lp = netdev_priv(dev); | 2429 | net_local *lp = netdev_priv(dev); |
2430 | psa_t psa; | 2430 | psa_t psa; |
2431 | unsigned long flags; | 2431 | unsigned long flags; |
@@ -2701,7 +2701,7 @@ static const struct iw_handler_def wavelan_handler_def = | |||
2701 | static iw_stats * | 2701 | static iw_stats * |
2702 | wavelan_get_wireless_stats(struct net_device * dev) | 2702 | wavelan_get_wireless_stats(struct net_device * dev) |
2703 | { | 2703 | { |
2704 | kio_addr_t base = dev->base_addr; | 2704 | unsigned int base = dev->base_addr; |
2705 | net_local * lp = netdev_priv(dev); | 2705 | net_local * lp = netdev_priv(dev); |
2706 | mmr_t m; | 2706 | mmr_t m; |
2707 | iw_stats * wstats; | 2707 | iw_stats * wstats; |
@@ -2764,7 +2764,7 @@ wv_start_of_frame(struct net_device * dev, | |||
2764 | int rfp, /* end of frame */ | 2764 | int rfp, /* end of frame */ |
2765 | int wrap) /* start of buffer */ | 2765 | int wrap) /* start of buffer */ |
2766 | { | 2766 | { |
2767 | kio_addr_t base = dev->base_addr; | 2767 | unsigned int base = dev->base_addr; |
2768 | int rp; | 2768 | int rp; |
2769 | int len; | 2769 | int len; |
2770 | 2770 | ||
@@ -2925,7 +2925,7 @@ wv_packet_read(struct net_device * dev, | |||
2925 | static inline void | 2925 | static inline void |
2926 | wv_packet_rcv(struct net_device * dev) | 2926 | wv_packet_rcv(struct net_device * dev) |
2927 | { | 2927 | { |
2928 | kio_addr_t base = dev->base_addr; | 2928 | unsigned int base = dev->base_addr; |
2929 | net_local * lp = netdev_priv(dev); | 2929 | net_local * lp = netdev_priv(dev); |
2930 | int newrfp; | 2930 | int newrfp; |
2931 | int rp; | 2931 | int rp; |
@@ -3062,7 +3062,7 @@ wv_packet_write(struct net_device * dev, | |||
3062 | short length) | 3062 | short length) |
3063 | { | 3063 | { |
3064 | net_local * lp = netdev_priv(dev); | 3064 | net_local * lp = netdev_priv(dev); |
3065 | kio_addr_t base = dev->base_addr; | 3065 | unsigned int base = dev->base_addr; |
3066 | unsigned long flags; | 3066 | unsigned long flags; |
3067 | int clen = length; | 3067 | int clen = length; |
3068 | register u_short xmtdata_base = TX_BASE; | 3068 | register u_short xmtdata_base = TX_BASE; |
@@ -3183,7 +3183,7 @@ wavelan_packet_xmit(struct sk_buff * skb, | |||
3183 | static inline int | 3183 | static inline int |
3184 | wv_mmc_init(struct net_device * dev) | 3184 | wv_mmc_init(struct net_device * dev) |
3185 | { | 3185 | { |
3186 | kio_addr_t base = dev->base_addr; | 3186 | unsigned int base = dev->base_addr; |
3187 | psa_t psa; | 3187 | psa_t psa; |
3188 | mmw_t m; | 3188 | mmw_t m; |
3189 | int configured; | 3189 | int configured; |
@@ -3377,7 +3377,7 @@ wv_mmc_init(struct net_device * dev) | |||
3377 | static int | 3377 | static int |
3378 | wv_ru_stop(struct net_device * dev) | 3378 | wv_ru_stop(struct net_device * dev) |
3379 | { | 3379 | { |
3380 | kio_addr_t base = dev->base_addr; | 3380 | unsigned int base = dev->base_addr; |
3381 | net_local * lp = netdev_priv(dev); | 3381 | net_local * lp = netdev_priv(dev); |
3382 | unsigned long flags; | 3382 | unsigned long flags; |
3383 | int status; | 3383 | int status; |
@@ -3440,7 +3440,7 @@ wv_ru_stop(struct net_device * dev) | |||
3440 | static int | 3440 | static int |
3441 | wv_ru_start(struct net_device * dev) | 3441 | wv_ru_start(struct net_device * dev) |
3442 | { | 3442 | { |
3443 | kio_addr_t base = dev->base_addr; | 3443 | unsigned int base = dev->base_addr; |
3444 | net_local * lp = netdev_priv(dev); | 3444 | net_local * lp = netdev_priv(dev); |
3445 | unsigned long flags; | 3445 | unsigned long flags; |
3446 | 3446 | ||
@@ -3528,7 +3528,7 @@ wv_ru_start(struct net_device * dev) | |||
3528 | static int | 3528 | static int |
3529 | wv_82593_config(struct net_device * dev) | 3529 | wv_82593_config(struct net_device * dev) |
3530 | { | 3530 | { |
3531 | kio_addr_t base = dev->base_addr; | 3531 | unsigned int base = dev->base_addr; |
3532 | net_local * lp = netdev_priv(dev); | 3532 | net_local * lp = netdev_priv(dev); |
3533 | struct i82593_conf_block cfblk; | 3533 | struct i82593_conf_block cfblk; |
3534 | int ret = TRUE; | 3534 | int ret = TRUE; |
@@ -3765,7 +3765,7 @@ static int | |||
3765 | wv_hw_config(struct net_device * dev) | 3765 | wv_hw_config(struct net_device * dev) |
3766 | { | 3766 | { |
3767 | net_local * lp = netdev_priv(dev); | 3767 | net_local * lp = netdev_priv(dev); |
3768 | kio_addr_t base = dev->base_addr; | 3768 | unsigned int base = dev->base_addr; |
3769 | unsigned long flags; | 3769 | unsigned long flags; |
3770 | int ret = FALSE; | 3770 | int ret = FALSE; |
3771 | 3771 | ||
@@ -4047,7 +4047,7 @@ wavelan_interrupt(int irq, | |||
4047 | { | 4047 | { |
4048 | struct net_device * dev = dev_id; | 4048 | struct net_device * dev = dev_id; |
4049 | net_local * lp; | 4049 | net_local * lp; |
4050 | kio_addr_t base; | 4050 | unsigned int base; |
4051 | int status0; | 4051 | int status0; |
4052 | u_int tx_status; | 4052 | u_int tx_status; |
4053 | 4053 | ||
@@ -4306,7 +4306,7 @@ static void | |||
4306 | wavelan_watchdog(struct net_device * dev) | 4306 | wavelan_watchdog(struct net_device * dev) |
4307 | { | 4307 | { |
4308 | net_local * lp = netdev_priv(dev); | 4308 | net_local * lp = netdev_priv(dev); |
4309 | kio_addr_t base = dev->base_addr; | 4309 | unsigned int base = dev->base_addr; |
4310 | unsigned long flags; | 4310 | unsigned long flags; |
4311 | int aborted = FALSE; | 4311 | int aborted = FALSE; |
4312 | 4312 | ||
@@ -4382,7 +4382,7 @@ wavelan_open(struct net_device * dev) | |||
4382 | { | 4382 | { |
4383 | net_local * lp = netdev_priv(dev); | 4383 | net_local * lp = netdev_priv(dev); |
4384 | struct pcmcia_device * link = lp->link; | 4384 | struct pcmcia_device * link = lp->link; |
4385 | kio_addr_t base = dev->base_addr; | 4385 | unsigned int base = dev->base_addr; |
4386 | 4386 | ||
4387 | #ifdef DEBUG_CALLBACK_TRACE | 4387 | #ifdef DEBUG_CALLBACK_TRACE |
4388 | printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name, | 4388 | printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name, |
@@ -4436,7 +4436,7 @@ static int | |||
4436 | wavelan_close(struct net_device * dev) | 4436 | wavelan_close(struct net_device * dev) |
4437 | { | 4437 | { |
4438 | struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link; | 4438 | struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link; |
4439 | kio_addr_t base = dev->base_addr; | 4439 | unsigned int base = dev->base_addr; |
4440 | 4440 | ||
4441 | #ifdef DEBUG_CALLBACK_TRACE | 4441 | #ifdef DEBUG_CALLBACK_TRACE |
4442 | printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name, | 4442 | printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name, |
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile index f5ef03cf9879..21bda2031e7e 100644 --- a/drivers/nubus/Makefile +++ b/drivers/nubus/Makefile | |||
@@ -4,5 +4,4 @@ | |||
4 | 4 | ||
5 | obj-y := nubus.o | 5 | obj-y := nubus.o |
6 | 6 | ||
7 | obj-$(CONFIG_MODULES) += nubus_syms.o | ||
8 | obj-$(CONFIG_PROC_FS) += proc.o | 7 | obj-$(CONFIG_PROC_FS) += proc.o |
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c index f4076aeb2098..2f047e573d86 100644 --- a/drivers/nubus/nubus.c +++ b/drivers/nubus/nubus.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/module.h> | ||
17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
18 | #include <asm/system.h> | 19 | #include <asm/system.h> |
19 | #include <asm/page.h> | 20 | #include <asm/page.h> |
@@ -186,6 +187,7 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent* dirent, | |||
186 | len--; | 187 | len--; |
187 | } | 188 | } |
188 | } | 189 | } |
190 | EXPORT_SYMBOL(nubus_get_rsrc_mem); | ||
189 | 191 | ||
190 | void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent, | 192 | void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent, |
191 | int len) | 193 | int len) |
@@ -200,6 +202,7 @@ void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent, | |||
200 | len--; | 202 | len--; |
201 | } | 203 | } |
202 | } | 204 | } |
205 | EXPORT_SYMBOL(nubus_get_rsrc_str); | ||
203 | 206 | ||
204 | int nubus_get_root_dir(const struct nubus_board* board, | 207 | int nubus_get_root_dir(const struct nubus_board* board, |
205 | struct nubus_dir* dir) | 208 | struct nubus_dir* dir) |
@@ -209,6 +212,7 @@ int nubus_get_root_dir(const struct nubus_board* board, | |||
209 | dir->mask = board->lanes; | 212 | dir->mask = board->lanes; |
210 | return 0; | 213 | return 0; |
211 | } | 214 | } |
215 | EXPORT_SYMBOL(nubus_get_root_dir); | ||
212 | 216 | ||
213 | /* This is a slyly renamed version of the above */ | 217 | /* This is a slyly renamed version of the above */ |
214 | int nubus_get_func_dir(const struct nubus_dev* dev, | 218 | int nubus_get_func_dir(const struct nubus_dev* dev, |
@@ -219,6 +223,7 @@ int nubus_get_func_dir(const struct nubus_dev* dev, | |||
219 | dir->mask = dev->board->lanes; | 223 | dir->mask = dev->board->lanes; |
220 | return 0; | 224 | return 0; |
221 | } | 225 | } |
226 | EXPORT_SYMBOL(nubus_get_func_dir); | ||
222 | 227 | ||
223 | int nubus_get_board_dir(const struct nubus_board* board, | 228 | int nubus_get_board_dir(const struct nubus_board* board, |
224 | struct nubus_dir* dir) | 229 | struct nubus_dir* dir) |
@@ -237,6 +242,7 @@ int nubus_get_board_dir(const struct nubus_board* board, | |||
237 | return -1; | 242 | return -1; |
238 | return 0; | 243 | return 0; |
239 | } | 244 | } |
245 | EXPORT_SYMBOL(nubus_get_board_dir); | ||
240 | 246 | ||
241 | int nubus_get_subdir(const struct nubus_dirent *ent, | 247 | int nubus_get_subdir(const struct nubus_dirent *ent, |
242 | struct nubus_dir *dir) | 248 | struct nubus_dir *dir) |
@@ -246,6 +252,7 @@ int nubus_get_subdir(const struct nubus_dirent *ent, | |||
246 | dir->mask = ent->mask; | 252 | dir->mask = ent->mask; |
247 | return 0; | 253 | return 0; |
248 | } | 254 | } |
255 | EXPORT_SYMBOL(nubus_get_subdir); | ||
249 | 256 | ||
250 | int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) | 257 | int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) |
251 | { | 258 | { |
@@ -274,12 +281,14 @@ int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) | |||
274 | ent->mask = nd->mask; | 281 | ent->mask = nd->mask; |
275 | return 0; | 282 | return 0; |
276 | } | 283 | } |
284 | EXPORT_SYMBOL(nubus_readdir); | ||
277 | 285 | ||
278 | int nubus_rewinddir(struct nubus_dir* dir) | 286 | int nubus_rewinddir(struct nubus_dir* dir) |
279 | { | 287 | { |
280 | dir->ptr = dir->base; | 288 | dir->ptr = dir->base; |
281 | return 0; | 289 | return 0; |
282 | } | 290 | } |
291 | EXPORT_SYMBOL(nubus_rewinddir); | ||
283 | 292 | ||
284 | /* Driver interface functions, more or less like in pci.c */ | 293 | /* Driver interface functions, more or less like in pci.c */ |
285 | 294 | ||
@@ -303,6 +312,7 @@ nubus_find_device(unsigned short category, | |||
303 | } | 312 | } |
304 | return NULL; | 313 | return NULL; |
305 | } | 314 | } |
315 | EXPORT_SYMBOL(nubus_find_device); | ||
306 | 316 | ||
307 | struct nubus_dev* | 317 | struct nubus_dev* |
308 | nubus_find_type(unsigned short category, | 318 | nubus_find_type(unsigned short category, |
@@ -320,6 +330,7 @@ nubus_find_type(unsigned short category, | |||
320 | } | 330 | } |
321 | return NULL; | 331 | return NULL; |
322 | } | 332 | } |
333 | EXPORT_SYMBOL(nubus_find_type); | ||
323 | 334 | ||
324 | struct nubus_dev* | 335 | struct nubus_dev* |
325 | nubus_find_slot(unsigned int slot, | 336 | nubus_find_slot(unsigned int slot, |
@@ -335,6 +346,7 @@ nubus_find_slot(unsigned int slot, | |||
335 | } | 346 | } |
336 | return NULL; | 347 | return NULL; |
337 | } | 348 | } |
349 | EXPORT_SYMBOL(nubus_find_slot); | ||
338 | 350 | ||
339 | int | 351 | int |
340 | nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type, | 352 | nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type, |
@@ -346,6 +358,7 @@ nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type, | |||
346 | } | 358 | } |
347 | return -1; | 359 | return -1; |
348 | } | 360 | } |
361 | EXPORT_SYMBOL(nubus_find_rsrc); | ||
349 | 362 | ||
350 | /* Initialization functions - decide which slots contain stuff worth | 363 | /* Initialization functions - decide which slots contain stuff worth |
351 | looking at, and print out lots and lots of information from the | 364 | looking at, and print out lots and lots of information from the |
diff --git a/drivers/nubus/nubus_syms.c b/drivers/nubus/nubus_syms.c deleted file mode 100644 index 9204f04fbf0b..000000000000 --- a/drivers/nubus/nubus_syms.c +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* Exported symbols for NuBus services | ||
2 | |||
3 | (c) 1999 David Huggins-Daines <dhd@debian.org> */ | ||
4 | |||
5 | #include <linux/module.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/nubus.h> | ||
8 | |||
9 | #ifdef CONFIG_PROC_FS | ||
10 | EXPORT_SYMBOL(nubus_proc_attach_device); | ||
11 | EXPORT_SYMBOL(nubus_proc_detach_device); | ||
12 | #endif | ||
13 | |||
14 | MODULE_LICENSE("GPL"); | ||
15 | |||
16 | EXPORT_SYMBOL(nubus_find_device); | ||
17 | EXPORT_SYMBOL(nubus_find_type); | ||
18 | EXPORT_SYMBOL(nubus_find_slot); | ||
19 | EXPORT_SYMBOL(nubus_get_root_dir); | ||
20 | EXPORT_SYMBOL(nubus_get_board_dir); | ||
21 | EXPORT_SYMBOL(nubus_get_func_dir); | ||
22 | EXPORT_SYMBOL(nubus_readdir); | ||
23 | EXPORT_SYMBOL(nubus_find_rsrc); | ||
24 | EXPORT_SYMBOL(nubus_rewinddir); | ||
25 | EXPORT_SYMBOL(nubus_get_subdir); | ||
26 | EXPORT_SYMBOL(nubus_get_rsrc_mem); | ||
27 | EXPORT_SYMBOL(nubus_get_rsrc_str); | ||
28 | |||
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c index 5271a4a7af26..e07492be1f4a 100644 --- a/drivers/nubus/proc.c +++ b/drivers/nubus/proc.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/nubus.h> | 22 | #include <linux/nubus.h> |
23 | #include <linux/proc_fs.h> | 23 | #include <linux/proc_fs.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | ||
26 | |||
25 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
26 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
27 | 29 | ||
@@ -140,6 +142,7 @@ int nubus_proc_attach_device(struct nubus_dev *dev) | |||
140 | 142 | ||
141 | return 0; | 143 | return 0; |
142 | } | 144 | } |
145 | EXPORT_SYMBOL(nubus_proc_attach_device); | ||
143 | 146 | ||
144 | /* FIXME: this is certainly broken! */ | 147 | /* FIXME: this is certainly broken! */ |
145 | int nubus_proc_detach_device(struct nubus_dev *dev) | 148 | int nubus_proc_detach_device(struct nubus_dev *dev) |
@@ -154,6 +157,7 @@ int nubus_proc_detach_device(struct nubus_dev *dev) | |||
154 | } | 157 | } |
155 | return 0; | 158 | return 0; |
156 | } | 159 | } |
160 | EXPORT_SYMBOL(nubus_proc_detach_device); | ||
157 | 161 | ||
158 | void __init proc_bus_nubus_add_devices(void) | 162 | void __init proc_bus_nubus_add_devices(void) |
159 | { | 163 | { |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index ca52307b8f40..d08b284de196 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
941 | ** w/o this association, we wouldn't have coherent DMA! | 941 | ** w/o this association, we wouldn't have coherent DMA! |
942 | ** Access to the virtual address is what forces a two pass algorithm. | 942 | ** Access to the virtual address is what forces a two pass algorithm. |
943 | */ | 943 | */ |
944 | coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); | 944 | coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range); |
945 | 945 | ||
946 | /* | 946 | /* |
947 | ** Program the I/O Pdir | 947 | ** Program the I/O Pdir |
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 0a1f99a2e93e..97ba8286c596 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h | |||
@@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, | |||
95 | */ | 95 | */ |
96 | 96 | ||
97 | static inline unsigned int | 97 | static inline unsigned int |
98 | iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, | 98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
99 | struct scatterlist *startsg, int nents, | ||
99 | int (*iommu_alloc_range)(struct ioc *, size_t)) | 100 | int (*iommu_alloc_range)(struct ioc *, size_t)) |
100 | { | 101 | { |
101 | struct scatterlist *contig_sg; /* contig chunk head */ | 102 | struct scatterlist *contig_sg; /* contig chunk head */ |
102 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
103 | unsigned int n_mappings = 0; | 104 | unsigned int n_mappings = 0; |
105 | unsigned int max_seg_size = dma_get_max_seg_size(dev); | ||
104 | 106 | ||
105 | while (nents > 0) { | 107 | while (nents > 0) { |
106 | 108 | ||
@@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, | |||
142 | IOVP_SIZE) > DMA_CHUNK_SIZE)) | 144 | IOVP_SIZE) > DMA_CHUNK_SIZE)) |
143 | break; | 145 | break; |
144 | 146 | ||
147 | if (startsg->length + dma_len > max_seg_size) | ||
148 | break; | ||
149 | |||
145 | /* | 150 | /* |
146 | ** Next see if we can append the next chunk (i.e. | 151 | ** Next see if we can append the next chunk (i.e. |
147 | ** it must end on one page and begin on another | 152 | ** it must end on one page and begin on another |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index e527a0e1d6c0..d06627c3f353 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
946 | ** w/o this association, we wouldn't have coherent DMA! | 946 | ** w/o this association, we wouldn't have coherent DMA! |
947 | ** Access to the virtual address is what forces a two pass algorithm. | 947 | ** Access to the virtual address is what forces a two pass algorithm. |
948 | */ | 948 | */ |
949 | coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); | 949 | coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); |
950 | 950 | ||
951 | /* | 951 | /* |
952 | ** Program the I/O Pdir | 952 | ** Program the I/O Pdir |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 04aac7782468..ae3df46eaabf 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1451,6 +1451,22 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |||
1451 | } | 1451 | } |
1452 | #endif | 1452 | #endif |
1453 | 1453 | ||
1454 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE | ||
1455 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) | ||
1456 | { | ||
1457 | return dma_set_max_seg_size(&dev->dev, size); | ||
1458 | } | ||
1459 | EXPORT_SYMBOL(pci_set_dma_max_seg_size); | ||
1460 | #endif | ||
1461 | |||
1462 | #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY | ||
1463 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) | ||
1464 | { | ||
1465 | return dma_set_seg_boundary(&dev->dev, mask); | ||
1466 | } | ||
1467 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); | ||
1468 | #endif | ||
1469 | |||
1454 | /** | 1470 | /** |
1455 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count | 1471 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count |
1456 | * @dev: PCI device to query | 1472 | * @dev: PCI device to query |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7f5dab34d315..4d23b9fb551b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -933,8 +933,12 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
933 | 933 | ||
934 | set_dev_node(&dev->dev, pcibus_to_node(bus)); | 934 | set_dev_node(&dev->dev, pcibus_to_node(bus)); |
935 | dev->dev.dma_mask = &dev->dma_mask; | 935 | dev->dev.dma_mask = &dev->dma_mask; |
936 | dev->dev.dma_parms = &dev->dma_parms; | ||
936 | dev->dev.coherent_dma_mask = 0xffffffffull; | 937 | dev->dev.coherent_dma_mask = 0xffffffffull; |
937 | 938 | ||
939 | pci_set_dma_max_seg_size(dev, 65536); | ||
940 | pci_set_dma_seg_boundary(dev, 0xffffffff); | ||
941 | |||
938 | /* Fix up broken headers */ | 942 | /* Fix up broken headers */ |
939 | pci_fixup_device(pci_fixup_header, dev); | 943 | pci_fixup_device(pci_fixup_header, dev); |
940 | 944 | ||
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c index eb6abd3f9221..385e145e1acc 100644 --- a/drivers/pcmcia/at91_cf.c +++ b/drivers/pcmcia/at91_cf.c | |||
@@ -21,9 +21,9 @@ | |||
21 | #include <asm/hardware.h> | 21 | #include <asm/hardware.h> |
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | #include <asm/sizes.h> | 23 | #include <asm/sizes.h> |
24 | #include <asm/gpio.h> | ||
24 | 25 | ||
25 | #include <asm/arch/board.h> | 26 | #include <asm/arch/board.h> |
26 | #include <asm/arch/gpio.h> | ||
27 | #include <asm/arch/at91rm9200_mc.h> | 27 | #include <asm/arch/at91rm9200_mc.h> |
28 | 28 | ||
29 | 29 | ||
@@ -56,7 +56,7 @@ struct at91_cf_socket { | |||
56 | 56 | ||
57 | static inline int at91_cf_present(struct at91_cf_socket *cf) | 57 | static inline int at91_cf_present(struct at91_cf_socket *cf) |
58 | { | 58 | { |
59 | return !at91_get_gpio_value(cf->board->det_pin); | 59 | return !gpio_get_value(cf->board->det_pin); |
60 | } | 60 | } |
61 | 61 | ||
62 | /*--------------------------------------------------------------------------*/ | 62 | /*--------------------------------------------------------------------------*/ |
@@ -100,9 +100,9 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp) | |||
100 | int vcc = cf->board->vcc_pin; | 100 | int vcc = cf->board->vcc_pin; |
101 | 101 | ||
102 | *sp = SS_DETECT | SS_3VCARD; | 102 | *sp = SS_DETECT | SS_3VCARD; |
103 | if (!rdy || at91_get_gpio_value(rdy)) | 103 | if (!rdy || gpio_get_value(rdy)) |
104 | *sp |= SS_READY; | 104 | *sp |= SS_READY; |
105 | if (!vcc || at91_get_gpio_value(vcc)) | 105 | if (!vcc || gpio_get_value(vcc)) |
106 | *sp |= SS_POWERON; | 106 | *sp |= SS_POWERON; |
107 | } else | 107 | } else |
108 | *sp = 0; | 108 | *sp = 0; |
@@ -121,10 +121,10 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s) | |||
121 | if (cf->board->vcc_pin) { | 121 | if (cf->board->vcc_pin) { |
122 | switch (s->Vcc) { | 122 | switch (s->Vcc) { |
123 | case 0: | 123 | case 0: |
124 | at91_set_gpio_value(cf->board->vcc_pin, 0); | 124 | gpio_set_value(cf->board->vcc_pin, 0); |
125 | break; | 125 | break; |
126 | case 33: | 126 | case 33: |
127 | at91_set_gpio_value(cf->board->vcc_pin, 1); | 127 | gpio_set_value(cf->board->vcc_pin, 1); |
128 | break; | 128 | break; |
129 | default: | 129 | default: |
130 | return -EINVAL; | 130 | return -EINVAL; |
@@ -132,7 +132,7 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /* toggle reset if needed */ | 134 | /* toggle reset if needed */ |
135 | at91_set_gpio_value(cf->board->rst_pin, s->flags & SS_RESET); | 135 | gpio_set_value(cf->board->rst_pin, s->flags & SS_RESET); |
136 | 136 | ||
137 | pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n", | 137 | pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n", |
138 | driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask); | 138 | driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask); |
@@ -239,11 +239,24 @@ static int __init at91_cf_probe(struct platform_device *pdev) | |||
239 | platform_set_drvdata(pdev, cf); | 239 | platform_set_drvdata(pdev, cf); |
240 | 240 | ||
241 | /* must be a GPIO; ergo must trigger on both edges */ | 241 | /* must be a GPIO; ergo must trigger on both edges */ |
242 | status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf); | 242 | status = gpio_request(board->det_pin, "cf_det"); |
243 | if (status < 0) | 243 | if (status < 0) |
244 | goto fail0; | 244 | goto fail0; |
245 | status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf); | ||
246 | if (status < 0) | ||
247 | goto fail00; | ||
245 | device_init_wakeup(&pdev->dev, 1); | 248 | device_init_wakeup(&pdev->dev, 1); |
246 | 249 | ||
250 | status = gpio_request(board->rst_pin, "cf_rst"); | ||
251 | if (status < 0) | ||
252 | goto fail0a; | ||
253 | |||
254 | if (board->vcc_pin) { | ||
255 | status = gpio_request(board->vcc_pin, "cf_vcc"); | ||
256 | if (status < 0) | ||
257 | goto fail0b; | ||
258 | } | ||
259 | |||
247 | /* | 260 | /* |
248 | * The card driver will request this irq later as needed. | 261 | * The card driver will request this irq later as needed. |
249 | * but it causes lots of "irqNN: nobody cared" messages | 262 | * but it causes lots of "irqNN: nobody cared" messages |
@@ -251,16 +264,20 @@ static int __init at91_cf_probe(struct platform_device *pdev) | |||
251 | * (Note: DK board doesn't wire the IRQ pin...) | 264 | * (Note: DK board doesn't wire the IRQ pin...) |
252 | */ | 265 | */ |
253 | if (board->irq_pin) { | 266 | if (board->irq_pin) { |
267 | status = gpio_request(board->irq_pin, "cf_irq"); | ||
268 | if (status < 0) | ||
269 | goto fail0c; | ||
254 | status = request_irq(board->irq_pin, at91_cf_irq, | 270 | status = request_irq(board->irq_pin, at91_cf_irq, |
255 | IRQF_SHARED, driver_name, cf); | 271 | IRQF_SHARED, driver_name, cf); |
256 | if (status < 0) | 272 | if (status < 0) |
257 | goto fail0a; | 273 | goto fail0d; |
258 | cf->socket.pci_irq = board->irq_pin; | 274 | cf->socket.pci_irq = board->irq_pin; |
259 | } else | 275 | } else |
260 | cf->socket.pci_irq = NR_IRQS + 1; | 276 | cf->socket.pci_irq = NR_IRQS + 1; |
261 | 277 | ||
262 | /* pcmcia layer only remaps "real" memory not iospace */ | 278 | /* pcmcia layer only remaps "real" memory not iospace */ |
263 | cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K); | 279 | cf->socket.io_offset = (unsigned long) |
280 | ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K); | ||
264 | if (!cf->socket.io_offset) { | 281 | if (!cf->socket.io_offset) { |
265 | status = -ENXIO; | 282 | status = -ENXIO; |
266 | goto fail1; | 283 | goto fail1; |
@@ -296,11 +313,21 @@ fail2: | |||
296 | fail1: | 313 | fail1: |
297 | if (cf->socket.io_offset) | 314 | if (cf->socket.io_offset) |
298 | iounmap((void __iomem *) cf->socket.io_offset); | 315 | iounmap((void __iomem *) cf->socket.io_offset); |
299 | if (board->irq_pin) | 316 | if (board->irq_pin) { |
300 | free_irq(board->irq_pin, cf); | 317 | free_irq(board->irq_pin, cf); |
318 | fail0d: | ||
319 | gpio_free(board->irq_pin); | ||
320 | } | ||
321 | fail0c: | ||
322 | if (board->vcc_pin) | ||
323 | gpio_free(board->vcc_pin); | ||
324 | fail0b: | ||
325 | gpio_free(board->rst_pin); | ||
301 | fail0a: | 326 | fail0a: |
302 | device_init_wakeup(&pdev->dev, 0); | 327 | device_init_wakeup(&pdev->dev, 0); |
303 | free_irq(board->det_pin, cf); | 328 | free_irq(board->det_pin, cf); |
329 | fail00: | ||
330 | gpio_free(board->det_pin); | ||
304 | fail0: | 331 | fail0: |
305 | kfree(cf); | 332 | kfree(cf); |
306 | return status; | 333 | return status; |
@@ -313,13 +340,18 @@ static int __exit at91_cf_remove(struct platform_device *pdev) | |||
313 | struct resource *io = cf->socket.io[0].res; | 340 | struct resource *io = cf->socket.io[0].res; |
314 | 341 | ||
315 | pcmcia_unregister_socket(&cf->socket); | 342 | pcmcia_unregister_socket(&cf->socket); |
316 | if (board->irq_pin) | 343 | release_mem_region(io->start, io->end + 1 - io->start); |
344 | iounmap((void __iomem *) cf->socket.io_offset); | ||
345 | if (board->irq_pin) { | ||
317 | free_irq(board->irq_pin, cf); | 346 | free_irq(board->irq_pin, cf); |
347 | gpio_free(board->irq_pin); | ||
348 | } | ||
349 | if (board->vcc_pin) | ||
350 | gpio_free(board->vcc_pin); | ||
351 | gpio_free(board->rst_pin); | ||
318 | device_init_wakeup(&pdev->dev, 0); | 352 | device_init_wakeup(&pdev->dev, 0); |
319 | free_irq(board->det_pin, cf); | 353 | free_irq(board->det_pin, cf); |
320 | iounmap((void __iomem *) cf->socket.io_offset); | 354 | gpio_free(board->det_pin); |
321 | release_mem_region(io->start, io->end + 1 - io->start); | ||
322 | |||
323 | kfree(cf); | 355 | kfree(cf); |
324 | return 0; | 356 | return 0; |
325 | } | 357 | } |
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index a1bd763b4e33..714baaeb6da1 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c | |||
@@ -143,7 +143,7 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void | |||
143 | /* Config space? */ | 143 | /* Config space? */ |
144 | if (space == 0) { | 144 | if (space == 0) { |
145 | if (addr + len > 0x100) | 145 | if (addr + len > 0x100) |
146 | goto fail; | 146 | goto failput; |
147 | for (; len; addr++, ptr++, len--) | 147 | for (; len; addr++, ptr++, len--) |
148 | pci_read_config_byte(dev, addr, ptr); | 148 | pci_read_config_byte(dev, addr, ptr); |
149 | return 0; | 149 | return 0; |
@@ -171,6 +171,8 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void | |||
171 | memcpy_fromio(ptr, s->cb_cis_virt + addr, len); | 171 | memcpy_fromio(ptr, s->cb_cis_virt + addr, len); |
172 | return 0; | 172 | return 0; |
173 | 173 | ||
174 | failput: | ||
175 | pci_dev_put(dev); | ||
174 | fail: | 176 | fail: |
175 | memset(ptr, 0xff, len); | 177 | memset(ptr, 0xff, len); |
176 | return -1; | 178 | return -1; |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 15c18f5246d6..5a85871f5ee9 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -865,11 +865,12 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) | |||
865 | ds_dbg(1, "trying to load CIS file %s\n", filename); | 865 | ds_dbg(1, "trying to load CIS file %s\n", filename); |
866 | 866 | ||
867 | if (strlen(filename) > 14) { | 867 | if (strlen(filename) > 14) { |
868 | printk(KERN_WARNING "pcmcia: CIS filename is too long\n"); | 868 | printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n", |
869 | filename); | ||
869 | return -EINVAL; | 870 | return -EINVAL; |
870 | } | 871 | } |
871 | 872 | ||
872 | snprintf(path, 20, "%s", filename); | 873 | snprintf(path, sizeof(path), "%s", filename); |
873 | 874 | ||
874 | if (request_firmware(&fw, path, &dev->dev) == 0) { | 875 | if (request_firmware(&fw, path, &dev->dev) == 0) { |
875 | if (fw->size >= CISTPL_MAX_CIS_SIZE) { | 876 | if (fw->size >= CISTPL_MAX_CIS_SIZE) { |
@@ -1130,8 +1131,6 @@ static int runtime_suspend(struct device *dev) | |||
1130 | down(&dev->sem); | 1131 | down(&dev->sem); |
1131 | rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); | 1132 | rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); |
1132 | up(&dev->sem); | 1133 | up(&dev->sem); |
1133 | if (!rc) | ||
1134 | dev->power.power_state.event = PM_EVENT_SUSPEND; | ||
1135 | return rc; | 1134 | return rc; |
1136 | } | 1135 | } |
1137 | 1136 | ||
@@ -1142,8 +1141,6 @@ static void runtime_resume(struct device *dev) | |||
1142 | down(&dev->sem); | 1141 | down(&dev->sem); |
1143 | rc = pcmcia_dev_resume(dev); | 1142 | rc = pcmcia_dev_resume(dev); |
1144 | up(&dev->sem); | 1143 | up(&dev->sem); |
1145 | if (!rc) | ||
1146 | dev->power.power_state.event = PM_EVENT_ON; | ||
1147 | } | 1144 | } |
1148 | 1145 | ||
1149 | /************************ per-device sysfs output ***************************/ | 1146 | /************************ per-device sysfs output ***************************/ |
@@ -1265,6 +1262,9 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state) | |||
1265 | struct pcmcia_driver *p_drv = NULL; | 1262 | struct pcmcia_driver *p_drv = NULL; |
1266 | int ret = 0; | 1263 | int ret = 0; |
1267 | 1264 | ||
1265 | if (p_dev->suspended) | ||
1266 | return 0; | ||
1267 | |||
1268 | ds_dbg(2, "suspending %s\n", dev->bus_id); | 1268 | ds_dbg(2, "suspending %s\n", dev->bus_id); |
1269 | 1269 | ||
1270 | if (dev->driver) | 1270 | if (dev->driver) |
@@ -1301,6 +1301,9 @@ static int pcmcia_dev_resume(struct device * dev) | |||
1301 | struct pcmcia_driver *p_drv = NULL; | 1301 | struct pcmcia_driver *p_drv = NULL; |
1302 | int ret = 0; | 1302 | int ret = 0; |
1303 | 1303 | ||
1304 | if (!p_dev->suspended) | ||
1305 | return 0; | ||
1306 | |||
1304 | ds_dbg(2, "resuming %s\n", dev->bus_id); | 1307 | ds_dbg(2, "resuming %s\n", dev->bus_id); |
1305 | 1308 | ||
1306 | if (dev->driver) | 1309 | if (dev->driver) |
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index df21e2d16f87..749515534cc0 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c | |||
@@ -82,7 +82,7 @@ struct socket_info { | |||
82 | 1 = empty socket, | 82 | 1 = empty socket, |
83 | 2 = card but not initialized, | 83 | 2 = card but not initialized, |
84 | 3 = operational card */ | 84 | 3 = operational card */ |
85 | kio_addr_t io_base; /* base io address of the socket */ | 85 | unsigned int io_base; /* base io address of the socket */ |
86 | 86 | ||
87 | struct pcmcia_socket socket; | 87 | struct pcmcia_socket socket; |
88 | struct pci_dev *dev; /* The PCI device for the socket */ | 88 | struct pci_dev *dev; /* The PCI device for the socket */ |
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index 839bb1c0db58..32a2ab119798 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c | |||
@@ -164,7 +164,7 @@ struct i82365_socket { | |||
164 | u_short type, flags; | 164 | u_short type, flags; |
165 | struct pcmcia_socket socket; | 165 | struct pcmcia_socket socket; |
166 | unsigned int number; | 166 | unsigned int number; |
167 | kio_addr_t ioaddr; | 167 | unsigned int ioaddr; |
168 | u_short psock; | 168 | u_short psock; |
169 | u_char cs_irq, intr; | 169 | u_char cs_irq, intr; |
170 | union { | 170 | union { |
@@ -238,7 +238,7 @@ static u_char i365_get(u_short sock, u_short reg) | |||
238 | unsigned long flags; | 238 | unsigned long flags; |
239 | spin_lock_irqsave(&bus_lock,flags); | 239 | spin_lock_irqsave(&bus_lock,flags); |
240 | { | 240 | { |
241 | kio_addr_t port = socket[sock].ioaddr; | 241 | unsigned int port = socket[sock].ioaddr; |
242 | u_char val; | 242 | u_char val; |
243 | reg = I365_REG(socket[sock].psock, reg); | 243 | reg = I365_REG(socket[sock].psock, reg); |
244 | outb(reg, port); val = inb(port+1); | 244 | outb(reg, port); val = inb(port+1); |
@@ -252,7 +252,7 @@ static void i365_set(u_short sock, u_short reg, u_char data) | |||
252 | unsigned long flags; | 252 | unsigned long flags; |
253 | spin_lock_irqsave(&bus_lock,flags); | 253 | spin_lock_irqsave(&bus_lock,flags); |
254 | { | 254 | { |
255 | kio_addr_t port = socket[sock].ioaddr; | 255 | unsigned int port = socket[sock].ioaddr; |
256 | u_char val = I365_REG(socket[sock].psock, reg); | 256 | u_char val = I365_REG(socket[sock].psock, reg); |
257 | outb(val, port); outb(data, port+1); | 257 | outb(val, port); outb(data, port+1); |
258 | spin_unlock_irqrestore(&bus_lock,flags); | 258 | spin_unlock_irqrestore(&bus_lock,flags); |
@@ -588,7 +588,7 @@ static int to_cycles(int ns) | |||
588 | 588 | ||
589 | /*====================================================================*/ | 589 | /*====================================================================*/ |
590 | 590 | ||
591 | static int __init identify(kio_addr_t port, u_short sock) | 591 | static int __init identify(unsigned int port, u_short sock) |
592 | { | 592 | { |
593 | u_char val; | 593 | u_char val; |
594 | int type = -1; | 594 | int type = -1; |
@@ -659,7 +659,7 @@ static int __init identify(kio_addr_t port, u_short sock) | |||
659 | static int __init is_alive(u_short sock) | 659 | static int __init is_alive(u_short sock) |
660 | { | 660 | { |
661 | u_char stat; | 661 | u_char stat; |
662 | kio_addr_t start, stop; | 662 | unsigned int start, stop; |
663 | 663 | ||
664 | stat = i365_get(sock, I365_STATUS); | 664 | stat = i365_get(sock, I365_STATUS); |
665 | start = i365_get_pair(sock, I365_IO(0)+I365_W_START); | 665 | start = i365_get_pair(sock, I365_IO(0)+I365_W_START); |
@@ -678,7 +678,7 @@ static int __init is_alive(u_short sock) | |||
678 | 678 | ||
679 | /*====================================================================*/ | 679 | /*====================================================================*/ |
680 | 680 | ||
681 | static void __init add_socket(kio_addr_t port, int psock, int type) | 681 | static void __init add_socket(unsigned int port, int psock, int type) |
682 | { | 682 | { |
683 | socket[sockets].ioaddr = port; | 683 | socket[sockets].ioaddr = port; |
684 | socket[sockets].psock = psock; | 684 | socket[sockets].psock = psock; |
@@ -698,7 +698,7 @@ static void __init add_pcic(int ns, int type) | |||
698 | base = sockets-ns; | 698 | base = sockets-ns; |
699 | if (base == 0) printk("\n"); | 699 | if (base == 0) printk("\n"); |
700 | printk(KERN_INFO " %s", pcic[type].name); | 700 | printk(KERN_INFO " %s", pcic[type].name); |
701 | printk(" ISA-to-PCMCIA at port %#lx ofs 0x%02x", | 701 | printk(" ISA-to-PCMCIA at port %#x ofs 0x%02x", |
702 | t->ioaddr, t->psock*0x40); | 702 | t->ioaddr, t->psock*0x40); |
703 | printk(", %d socket%s\n", ns, ((ns > 1) ? "s" : "")); | 703 | printk(", %d socket%s\n", ns, ((ns > 1) ? "s" : "")); |
704 | 704 | ||
@@ -772,7 +772,7 @@ static struct pnp_dev *i82365_pnpdev; | |||
772 | static void __init isa_probe(void) | 772 | static void __init isa_probe(void) |
773 | { | 773 | { |
774 | int i, j, sock, k, ns, id; | 774 | int i, j, sock, k, ns, id; |
775 | kio_addr_t port; | 775 | unsigned int port; |
776 | #ifdef CONFIG_PNP | 776 | #ifdef CONFIG_PNP |
777 | struct isapnp_device_id *devid; | 777 | struct isapnp_device_id *devid; |
778 | struct pnp_dev *dev; | 778 | struct pnp_dev *dev; |
@@ -1053,7 +1053,7 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io) | |||
1053 | u_char map, ioctl; | 1053 | u_char map, ioctl; |
1054 | 1054 | ||
1055 | debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " | 1055 | debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " |
1056 | "%#lx-%#lx)\n", sock, io->map, io->flags, | 1056 | "%#x-%#x)\n", sock, io->map, io->flags, |
1057 | io->speed, io->start, io->stop); | 1057 | io->speed, io->start, io->stop); |
1058 | map = io->map; | 1058 | map = io->map; |
1059 | if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || | 1059 | if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || |
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index 91da15b5a81e..3616da227152 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c | |||
@@ -58,7 +58,7 @@ typedef struct pcc_socket { | |||
58 | u_short type, flags; | 58 | u_short type, flags; |
59 | struct pcmcia_socket socket; | 59 | struct pcmcia_socket socket; |
60 | unsigned int number; | 60 | unsigned int number; |
61 | kio_addr_t ioaddr; | 61 | unsigned int ioaddr; |
62 | u_long mapaddr; | 62 | u_long mapaddr; |
63 | u_long base; /* PCC register base */ | 63 | u_long base; /* PCC register base */ |
64 | u_char cs_irq1, cs_irq2, intr; | 64 | u_char cs_irq1, cs_irq2, intr; |
@@ -298,7 +298,8 @@ static int __init is_alive(u_short sock) | |||
298 | return 0; | 298 | return 0; |
299 | } | 299 | } |
300 | 300 | ||
301 | static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr) | 301 | static void add_pcc_socket(ulong base, int irq, ulong mapaddr, |
302 | unsigned int ioaddr) | ||
302 | { | 303 | { |
303 | pcc_socket_t *t = &socket[pcc_sockets]; | 304 | pcc_socket_t *t = &socket[pcc_sockets]; |
304 | 305 | ||
@@ -738,7 +739,7 @@ static int __init init_m32r_pcc(void) | |||
738 | #else /* CONFIG_PLAT_USRV */ | 739 | #else /* CONFIG_PLAT_USRV */ |
739 | { | 740 | { |
740 | ulong base, mapaddr; | 741 | ulong base, mapaddr; |
741 | kio_addr_t ioaddr; | 742 | unsigned int ioaddr; |
742 | 743 | ||
743 | for (i = 0 ; i < M32R_MAX_PCC ; i++) { | 744 | for (i = 0 ; i < M32R_MAX_PCC ; i++) { |
744 | base = (ulong)PLD_CFRSTCR; | 745 | base = (ulong)PLD_CFRSTCR; |
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index ec4c1253ebbb..2b42b7155e34 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c | |||
@@ -65,7 +65,7 @@ typedef struct pcc_socket { | |||
65 | u_short type, flags; | 65 | u_short type, flags; |
66 | struct pcmcia_socket socket; | 66 | struct pcmcia_socket socket; |
67 | unsigned int number; | 67 | unsigned int number; |
68 | kio_addr_t ioaddr; | 68 | unsigned int ioaddr; |
69 | u_long mapaddr; | 69 | u_long mapaddr; |
70 | u_long base; /* PCC register base */ | 70 | u_long base; /* PCC register base */ |
71 | u_char cs_irq, intr; | 71 | u_char cs_irq, intr; |
@@ -310,7 +310,8 @@ static int __init is_alive(u_short sock) | |||
310 | return 0; | 310 | return 0; |
311 | } | 311 | } |
312 | 312 | ||
313 | static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr) | 313 | static void add_pcc_socket(ulong base, int irq, ulong mapaddr, |
314 | unsigned int ioaddr) | ||
314 | { | 315 | { |
315 | pcc_socket_t *t = &socket[pcc_sockets]; | 316 | pcc_socket_t *t = &socket[pcc_sockets]; |
316 | 317 | ||
@@ -491,7 +492,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) | |||
491 | u_char map; | 492 | u_char map; |
492 | 493 | ||
493 | debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " | 494 | debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " |
494 | "%#lx-%#lx)\n", sock, io->map, io->flags, | 495 | "%#x-%#x)\n", sock, io->map, io->flags, |
495 | io->speed, io->start, io->stop); | 496 | io->speed, io->start, io->stop); |
496 | map = io->map; | 497 | map = io->map; |
497 | 498 | ||
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 4ea426a25909..ac70d2cb7dd4 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c | |||
@@ -1174,8 +1174,10 @@ static int __init m8xx_probe(struct of_device *ofdev, | |||
1174 | 1174 | ||
1175 | pcmcia_schlvl = irq_of_parse_and_map(np, 0); | 1175 | pcmcia_schlvl = irq_of_parse_and_map(np, 0); |
1176 | hwirq = irq_map[pcmcia_schlvl].hwirq; | 1176 | hwirq = irq_map[pcmcia_schlvl].hwirq; |
1177 | if (pcmcia_schlvl < 0) | 1177 | if (pcmcia_schlvl < 0) { |
1178 | iounmap(pcmcia); | ||
1178 | return -EINVAL; | 1179 | return -EINVAL; |
1180 | } | ||
1179 | 1181 | ||
1180 | m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra; | 1182 | m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra; |
1181 | m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb; | 1183 | m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb; |
@@ -1189,6 +1191,7 @@ static int __init m8xx_probe(struct of_device *ofdev, | |||
1189 | driver_name, socket)) { | 1191 | driver_name, socket)) { |
1190 | pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n", | 1192 | pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n", |
1191 | pcmcia_schlvl); | 1193 | pcmcia_schlvl); |
1194 | iounmap(pcmcia); | ||
1192 | return -1; | 1195 | return -1; |
1193 | } | 1196 | } |
1194 | 1197 | ||
@@ -1284,6 +1287,7 @@ static int m8xx_remove(struct of_device *ofdev) | |||
1284 | } | 1287 | } |
1285 | for (i = 0; i < PCMCIA_SOCKETS_NO; i++) | 1288 | for (i = 0; i < PCMCIA_SOCKETS_NO; i++) |
1286 | pcmcia_unregister_socket(&socket[i].socket); | 1289 | pcmcia_unregister_socket(&socket[i].socket); |
1290 | iounmap(pcmcia); | ||
1287 | 1291 | ||
1288 | free_irq(pcmcia_schlvl, NULL); | 1292 | free_irq(pcmcia_schlvl, NULL); |
1289 | 1293 | ||
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 0ce39de834c4..1d128fbd1a92 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -65,23 +65,23 @@ extern int ds_pc_debug; | |||
65 | * Special stuff for managing IO windows, because they are scarce | 65 | * Special stuff for managing IO windows, because they are scarce |
66 | */ | 66 | */ |
67 | 67 | ||
68 | static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base, | 68 | static int alloc_io_space(struct pcmcia_socket *s, u_int attr, |
69 | ioaddr_t num, u_int lines) | 69 | unsigned int *base, unsigned int num, u_int lines) |
70 | { | 70 | { |
71 | int i; | 71 | int i; |
72 | kio_addr_t try, align; | 72 | unsigned int try, align; |
73 | 73 | ||
74 | align = (*base) ? (lines ? 1<<lines : 0) : 1; | 74 | align = (*base) ? (lines ? 1<<lines : 0) : 1; |
75 | if (align && (align < num)) { | 75 | if (align && (align < num)) { |
76 | if (*base) { | 76 | if (*base) { |
77 | ds_dbg(s, 0, "odd IO request: num %#x align %#lx\n", | 77 | ds_dbg(s, 0, "odd IO request: num %#x align %#x\n", |
78 | num, align); | 78 | num, align); |
79 | align = 0; | 79 | align = 0; |
80 | } else | 80 | } else |
81 | while (align && (align < num)) align <<= 1; | 81 | while (align && (align < num)) align <<= 1; |
82 | } | 82 | } |
83 | if (*base & ~(align-1)) { | 83 | if (*base & ~(align-1)) { |
84 | ds_dbg(s, 0, "odd IO request: base %#x align %#lx\n", | 84 | ds_dbg(s, 0, "odd IO request: base %#x align %#x\n", |
85 | *base, align); | 85 | *base, align); |
86 | align = 0; | 86 | align = 0; |
87 | } | 87 | } |
@@ -132,8 +132,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base, | |||
132 | } /* alloc_io_space */ | 132 | } /* alloc_io_space */ |
133 | 133 | ||
134 | 134 | ||
135 | static void release_io_space(struct pcmcia_socket *s, ioaddr_t base, | 135 | static void release_io_space(struct pcmcia_socket *s, unsigned int base, |
136 | ioaddr_t num) | 136 | unsigned int num) |
137 | { | 137 | { |
138 | int i; | 138 | int i; |
139 | 139 | ||
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index bfcaad6021cf..a8d100707721 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
@@ -186,15 +186,16 @@ static int sub_interval(struct resource_map *map, u_long base, u_long num) | |||
186 | ======================================================================*/ | 186 | ======================================================================*/ |
187 | 187 | ||
188 | #ifdef CONFIG_PCMCIA_PROBE | 188 | #ifdef CONFIG_PCMCIA_PROBE |
189 | static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num) | 189 | static void do_io_probe(struct pcmcia_socket *s, unsigned int base, |
190 | unsigned int num) | ||
190 | { | 191 | { |
191 | struct resource *res; | 192 | struct resource *res; |
192 | struct socket_data *s_data = s->resource_data; | 193 | struct socket_data *s_data = s->resource_data; |
193 | kio_addr_t i, j, bad; | 194 | unsigned int i, j, bad; |
194 | int any; | 195 | int any; |
195 | u_char *b, hole, most; | 196 | u_char *b, hole, most; |
196 | 197 | ||
197 | printk(KERN_INFO "cs: IO port probe %#lx-%#lx:", | 198 | printk(KERN_INFO "cs: IO port probe %#x-%#x:", |
198 | base, base+num-1); | 199 | base, base+num-1); |
199 | 200 | ||
200 | /* First, what does a floating port look like? */ | 201 | /* First, what does a floating port look like? */ |
@@ -233,7 +234,7 @@ static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num | |||
233 | } else { | 234 | } else { |
234 | if (bad) { | 235 | if (bad) { |
235 | sub_interval(&s_data->io_db, bad, i-bad); | 236 | sub_interval(&s_data->io_db, bad, i-bad); |
236 | printk(" %#lx-%#lx", bad, i-1); | 237 | printk(" %#x-%#x", bad, i-1); |
237 | bad = 0; | 238 | bad = 0; |
238 | } | 239 | } |
239 | } | 240 | } |
@@ -244,7 +245,7 @@ static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num | |||
244 | return; | 245 | return; |
245 | } else { | 246 | } else { |
246 | sub_interval(&s_data->io_db, bad, i-bad); | 247 | sub_interval(&s_data->io_db, bad, i-bad); |
247 | printk(" %#lx-%#lx", bad, i-1); | 248 | printk(" %#x-%#x", bad, i-1); |
248 | } | 249 | } |
249 | } | 250 | } |
250 | 251 | ||
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index 749ac3710914..5792bd5c54f9 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c | |||
@@ -719,7 +719,7 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) | |||
719 | u_short base, len, ioctl; | 719 | u_short base, len, ioctl; |
720 | 720 | ||
721 | debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " | 721 | debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " |
722 | "%#lx-%#lx)\n", psock, io->map, io->flags, | 722 | "%#x-%#x)\n", psock, io->map, io->flags, |
723 | io->speed, io->start, io->stop); | 723 | io->speed, io->start, io->stop); |
724 | if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || | 724 | if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || |
725 | (io->stop < io->start)) return -EINVAL; | 725 | (io->stop < io->start)) return -EINVAL; |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index d640427c74c8..d984e0fae630 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1057,12 +1057,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1057 | if (device->features & DASD_FEATURE_ERPLOG) { | 1057 | if (device->features & DASD_FEATURE_ERPLOG) { |
1058 | dasd_log_sense(cqr, irb); | 1058 | dasd_log_sense(cqr, irb); |
1059 | } | 1059 | } |
1060 | /* If we have no sense data, or we just don't want complex ERP | 1060 | /* |
1061 | * for this request, but if we have retries left, then just | 1061 | * If we don't want complex ERP for this request, then just |
1062 | * reset this request and retry it in the fastpath | 1062 | * reset this and retry it in the fastpath |
1063 | */ | 1063 | */ |
1064 | if (!(cqr->irb.esw.esw0.erw.cons && | 1064 | if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && |
1065 | test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) && | ||
1066 | cqr->retries > 0) { | 1065 | cqr->retries > 0) { |
1067 | DEV_MESSAGE(KERN_DEBUG, device, | 1066 | DEV_MESSAGE(KERN_DEBUG, device, |
1068 | "default ERP in fastpath (%i retries left)", | 1067 | "default ERP in fastpath (%i retries left)", |
@@ -1707,7 +1706,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) | |||
1707 | 1706 | ||
1708 | req = (struct request *) cqr->callback_data; | 1707 | req = (struct request *) cqr->callback_data; |
1709 | dasd_profile_end(cqr->block, cqr, req); | 1708 | dasd_profile_end(cqr->block, cqr, req); |
1710 | status = cqr->memdev->discipline->free_cp(cqr, req); | 1709 | status = cqr->block->base->discipline->free_cp(cqr, req); |
1711 | if (status <= 0) | 1710 | if (status <= 0) |
1712 | error = status ? status : -EIO; | 1711 | error = status ? status : -EIO; |
1713 | dasd_end_request(req, error); | 1712 | dasd_end_request(req, error); |
@@ -1742,12 +1741,8 @@ restart: | |||
1742 | 1741 | ||
1743 | /* Process requests that may be recovered */ | 1742 | /* Process requests that may be recovered */ |
1744 | if (cqr->status == DASD_CQR_NEED_ERP) { | 1743 | if (cqr->status == DASD_CQR_NEED_ERP) { |
1745 | if (cqr->irb.esw.esw0.erw.cons && | 1744 | erp_fn = base->discipline->erp_action(cqr); |
1746 | test_bit(DASD_CQR_FLAGS_USE_ERP, | 1745 | erp_fn(cqr); |
1747 | &cqr->flags)) { | ||
1748 | erp_fn = base->discipline->erp_action(cqr); | ||
1749 | erp_fn(cqr); | ||
1750 | } | ||
1751 | goto restart; | 1746 | goto restart; |
1752 | } | 1747 | } |
1753 | 1748 | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index c361ab69ec00..f69714a0e9e7 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -164,7 +164,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) | |||
164 | 164 | ||
165 | /* reset status to submit the request again... */ | 165 | /* reset status to submit the request again... */ |
166 | erp->status = DASD_CQR_FILLED; | 166 | erp->status = DASD_CQR_FILLED; |
167 | erp->retries = 1; | 167 | erp->retries = 10; |
168 | } else { | 168 | } else { |
169 | DEV_MESSAGE(KERN_ERR, device, | 169 | DEV_MESSAGE(KERN_ERR, device, |
170 | "No alternate channel path left (lpum=%x / " | 170 | "No alternate channel path left (lpum=%x / " |
@@ -301,8 +301,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) | |||
301 | erp->function = dasd_3990_erp_action_4; | 301 | erp->function = dasd_3990_erp_action_4; |
302 | 302 | ||
303 | } else { | 303 | } else { |
304 | 304 | if (sense && (sense[25] == 0x1D)) { /* state change pending */ | |
305 | if (sense[25] == 0x1D) { /* state change pending */ | ||
306 | 305 | ||
307 | DEV_MESSAGE(KERN_INFO, device, | 306 | DEV_MESSAGE(KERN_INFO, device, |
308 | "waiting for state change pending " | 307 | "waiting for state change pending " |
@@ -311,7 +310,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) | |||
311 | 310 | ||
312 | dasd_3990_erp_block_queue(erp, 30*HZ); | 311 | dasd_3990_erp_block_queue(erp, 30*HZ); |
313 | 312 | ||
314 | } else if (sense[25] == 0x1E) { /* busy */ | 313 | } else if (sense && (sense[25] == 0x1E)) { /* busy */ |
315 | DEV_MESSAGE(KERN_INFO, device, | 314 | DEV_MESSAGE(KERN_INFO, device, |
316 | "busy - redriving request later, " | 315 | "busy - redriving request later, " |
317 | "%d retries left", | 316 | "%d retries left", |
@@ -2120,6 +2119,34 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) | |||
2120 | */ | 2119 | */ |
2121 | 2120 | ||
2122 | /* | 2121 | /* |
2122 | * DASD_3990_ERP_CONTROL_CHECK | ||
2123 | * | ||
2124 | * DESCRIPTION | ||
2125 | * Does a generic inspection if a control check occured and sets up | ||
2126 | * the related error recovery procedure | ||
2127 | * | ||
2128 | * PARAMETER | ||
2129 | * erp pointer to the currently created default ERP | ||
2130 | * | ||
2131 | * RETURN VALUES | ||
2132 | * erp_filled pointer to the erp | ||
2133 | */ | ||
2134 | |||
2135 | static struct dasd_ccw_req * | ||
2136 | dasd_3990_erp_control_check(struct dasd_ccw_req *erp) | ||
2137 | { | ||
2138 | struct dasd_device *device = erp->startdev; | ||
2139 | |||
2140 | if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | ||
2141 | | SCHN_STAT_CHN_CTRL_CHK)) { | ||
2142 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | ||
2143 | "channel or interface control check"); | ||
2144 | erp = dasd_3990_erp_action_4(erp, NULL); | ||
2145 | } | ||
2146 | return erp; | ||
2147 | } | ||
2148 | |||
2149 | /* | ||
2123 | * DASD_3990_ERP_INSPECT | 2150 | * DASD_3990_ERP_INSPECT |
2124 | * | 2151 | * |
2125 | * DESCRIPTION | 2152 | * DESCRIPTION |
@@ -2145,8 +2172,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp) | |||
2145 | if (erp_new) | 2172 | if (erp_new) |
2146 | return erp_new; | 2173 | return erp_new; |
2147 | 2174 | ||
2175 | /* check if no concurrent sens is available */ | ||
2176 | if (!erp->refers->irb.esw.esw0.erw.cons) | ||
2177 | erp_new = dasd_3990_erp_control_check(erp); | ||
2148 | /* distinguish between 24 and 32 byte sense data */ | 2178 | /* distinguish between 24 and 32 byte sense data */ |
2149 | if (sense[27] & DASD_SENSE_BIT_0) { | 2179 | else if (sense[27] & DASD_SENSE_BIT_0) { |
2150 | 2180 | ||
2151 | /* inspect the 24 byte sense data */ | 2181 | /* inspect the 24 byte sense data */ |
2152 | erp_new = dasd_3990_erp_inspect_24(erp, sense); | 2182 | erp_new = dasd_3990_erp_inspect_24(erp, sense); |
@@ -2285,6 +2315,17 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) | |||
2285 | // return 0; /* CCW doesn't match */ | 2315 | // return 0; /* CCW doesn't match */ |
2286 | } | 2316 | } |
2287 | 2317 | ||
2318 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) | ||
2319 | return 0; | ||
2320 | |||
2321 | if ((cqr1->irb.esw.esw0.erw.cons == 0) && | ||
2322 | (cqr2->irb.esw.esw0.erw.cons == 0)) { | ||
2323 | if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | | ||
2324 | SCHN_STAT_CHN_CTRL_CHK)) == | ||
2325 | (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | | ||
2326 | SCHN_STAT_CHN_CTRL_CHK))) | ||
2327 | return 1; /* match with ifcc*/ | ||
2328 | } | ||
2288 | /* check sense data; byte 0-2,25,27 */ | 2329 | /* check sense data; byte 0-2,25,27 */ |
2289 | if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) && | 2330 | if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) && |
2290 | (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) && | 2331 | (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) && |
@@ -2560,17 +2601,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2560 | 2601 | ||
2561 | return cqr; | 2602 | return cqr; |
2562 | } | 2603 | } |
2563 | /* check if sense data are available */ | ||
2564 | if (!cqr->irb.ecw) { | ||
2565 | DEV_MESSAGE(KERN_DEBUG, device, | ||
2566 | "ERP called witout sense data avail ..." | ||
2567 | "request %p - NO ERP possible", cqr); | ||
2568 | |||
2569 | cqr->status = DASD_CQR_FAILED; | ||
2570 | |||
2571 | return cqr; | ||
2572 | |||
2573 | } | ||
2574 | 2604 | ||
2575 | /* check if error happened before */ | 2605 | /* check if error happened before */ |
2576 | erp = dasd_3990_erp_in_erp(cqr); | 2606 | erp = dasd_3990_erp_in_erp(cqr); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 7779bfce1c31..3faf0538b328 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -415,6 +415,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
415 | dev_info->gd->queue = dev_info->dcssblk_queue; | 415 | dev_info->gd->queue = dev_info->dcssblk_queue; |
416 | dev_info->gd->private_data = dev_info; | 416 | dev_info->gd->private_data = dev_info; |
417 | dev_info->gd->driverfs_dev = &dev_info->dev; | 417 | dev_info->gd->driverfs_dev = &dev_info->dev; |
418 | blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); | ||
419 | blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); | ||
418 | /* | 420 | /* |
419 | * load the segment | 421 | * load the segment |
420 | */ | 422 | */ |
@@ -472,9 +474,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
472 | if (rc) | 474 | if (rc) |
473 | goto unregister_dev; | 475 | goto unregister_dev; |
474 | 476 | ||
475 | blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); | ||
476 | blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); | ||
477 | |||
478 | add_disk(dev_info->gd); | 477 | add_disk(dev_info->gd); |
479 | 478 | ||
480 | switch (dev_info->segment_type) { | 479 | switch (dev_info->segment_type) { |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index e3b3d390b4a3..2e616e33891d 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
@@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count) | |||
332 | if (sclp_ttybuf == NULL) { | 332 | if (sclp_ttybuf == NULL) { |
333 | while (list_empty(&sclp_tty_pages)) { | 333 | while (list_empty(&sclp_tty_pages)) { |
334 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 334 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
335 | if (in_interrupt()) | 335 | if (in_atomic()) |
336 | sclp_sync_wait(); | 336 | sclp_sync_wait(); |
337 | else | 337 | else |
338 | wait_event(sclp_tty_waitq, | 338 | wait_event(sclp_tty_waitq, |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 40cd21bc5cc4..68071622d4bb 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -400,7 +400,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | |||
400 | while (list_empty(&sclp_vt220_empty)) { | 400 | while (list_empty(&sclp_vt220_empty)) { |
401 | spin_unlock_irqrestore(&sclp_vt220_lock, | 401 | spin_unlock_irqrestore(&sclp_vt220_lock, |
402 | flags); | 402 | flags); |
403 | if (in_interrupt()) | 403 | if (in_atomic()) |
404 | sclp_sync_wait(); | 404 | sclp_sync_wait(); |
405 | else | 405 | else |
406 | wait_event(sclp_vt220_waitq, | 406 | wait_event(sclp_vt220_waitq, |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 3964056a9a47..03914fa81174 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -391,12 +391,24 @@ ccwgroup_remove (struct device *dev) | |||
391 | return 0; | 391 | return 0; |
392 | } | 392 | } |
393 | 393 | ||
394 | static void ccwgroup_shutdown(struct device *dev) | ||
395 | { | ||
396 | struct ccwgroup_device *gdev; | ||
397 | struct ccwgroup_driver *gdrv; | ||
398 | |||
399 | gdev = to_ccwgroupdev(dev); | ||
400 | gdrv = to_ccwgroupdrv(dev->driver); | ||
401 | if (gdrv && gdrv->shutdown) | ||
402 | gdrv->shutdown(gdev); | ||
403 | } | ||
404 | |||
394 | static struct bus_type ccwgroup_bus_type = { | 405 | static struct bus_type ccwgroup_bus_type = { |
395 | .name = "ccwgroup", | 406 | .name = "ccwgroup", |
396 | .match = ccwgroup_bus_match, | 407 | .match = ccwgroup_bus_match, |
397 | .uevent = ccwgroup_uevent, | 408 | .uevent = ccwgroup_uevent, |
398 | .probe = ccwgroup_probe, | 409 | .probe = ccwgroup_probe, |
399 | .remove = ccwgroup_remove, | 410 | .remove = ccwgroup_remove, |
411 | .shutdown = ccwgroup_shutdown, | ||
400 | }; | 412 | }; |
401 | 413 | ||
402 | /** | 414 | /** |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index e7ba16a74ef7..007aaeb4f532 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -26,6 +26,25 @@ | |||
26 | 26 | ||
27 | static void *sei_page; | 27 | static void *sei_page; |
28 | 28 | ||
29 | static int chsc_error_from_response(int response) | ||
30 | { | ||
31 | switch (response) { | ||
32 | case 0x0001: | ||
33 | return 0; | ||
34 | case 0x0002: | ||
35 | case 0x0003: | ||
36 | case 0x0006: | ||
37 | case 0x0007: | ||
38 | case 0x0008: | ||
39 | case 0x000a: | ||
40 | return -EINVAL; | ||
41 | case 0x0004: | ||
42 | return -EOPNOTSUPP; | ||
43 | default: | ||
44 | return -EIO; | ||
45 | } | ||
46 | } | ||
47 | |||
29 | struct chsc_ssd_area { | 48 | struct chsc_ssd_area { |
30 | struct chsc_header request; | 49 | struct chsc_header request; |
31 | u16 :10; | 50 | u16 :10; |
@@ -75,11 +94,11 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | |||
75 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 94 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
76 | goto out_free; | 95 | goto out_free; |
77 | } | 96 | } |
78 | if (ssd_area->response.code != 0x0001) { | 97 | ret = chsc_error_from_response(ssd_area->response.code); |
98 | if (ret != 0) { | ||
79 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", | 99 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
80 | schid.ssid, schid.sch_no, | 100 | schid.ssid, schid.sch_no, |
81 | ssd_area->response.code); | 101 | ssd_area->response.code); |
82 | ret = -EIO; | ||
83 | goto out_free; | 102 | goto out_free; |
84 | } | 103 | } |
85 | if (!ssd_area->sch_valid) { | 104 | if (!ssd_area->sch_valid) { |
@@ -717,36 +736,15 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
717 | return (ccode == 3) ? -ENODEV : -EBUSY; | 736 | return (ccode == 3) ? -ENODEV : -EBUSY; |
718 | 737 | ||
719 | switch (secm_area->response.code) { | 738 | switch (secm_area->response.code) { |
720 | case 0x0001: /* Success. */ | 739 | case 0x0102: |
721 | ret = 0; | 740 | case 0x0103: |
722 | break; | ||
723 | case 0x0003: /* Invalid block. */ | ||
724 | case 0x0007: /* Invalid format. */ | ||
725 | case 0x0008: /* Other invalid block. */ | ||
726 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
727 | ret = -EINVAL; | ||
728 | break; | ||
729 | case 0x0004: /* Command not provided in model. */ | ||
730 | CIO_CRW_EVENT(2, "Model does not provide secm\n"); | ||
731 | ret = -EOPNOTSUPP; | ||
732 | break; | ||
733 | case 0x0102: /* cub adresses incorrect */ | ||
734 | CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); | ||
735 | ret = -EINVAL; | ||
736 | break; | ||
737 | case 0x0103: /* key error */ | ||
738 | CIO_CRW_EVENT(2, "Access key error in secm\n"); | ||
739 | ret = -EINVAL; | 741 | ret = -EINVAL; |
740 | break; | ||
741 | case 0x0105: /* error while starting */ | ||
742 | CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); | ||
743 | ret = -EIO; | ||
744 | break; | ||
745 | default: | 742 | default: |
746 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | 743 | ret = chsc_error_from_response(secm_area->response.code); |
747 | secm_area->response.code); | ||
748 | ret = -EIO; | ||
749 | } | 744 | } |
745 | if (ret != 0) | ||
746 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", | ||
747 | secm_area->response.code); | ||
750 | return ret; | 748 | return ret; |
751 | } | 749 | } |
752 | 750 | ||
@@ -827,27 +825,14 @@ int chsc_determine_channel_path_description(struct chp_id chpid, | |||
827 | goto out; | 825 | goto out; |
828 | } | 826 | } |
829 | 827 | ||
830 | switch (scpd_area->response.code) { | 828 | ret = chsc_error_from_response(scpd_area->response.code); |
831 | case 0x0001: /* Success. */ | 829 | if (ret == 0) |
830 | /* Success. */ | ||
832 | memcpy(desc, &scpd_area->desc, | 831 | memcpy(desc, &scpd_area->desc, |
833 | sizeof(struct channel_path_desc)); | 832 | sizeof(struct channel_path_desc)); |
834 | ret = 0; | 833 | else |
835 | break; | 834 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", |
836 | case 0x0003: /* Invalid block. */ | ||
837 | case 0x0007: /* Invalid format. */ | ||
838 | case 0x0008: /* Other invalid block. */ | ||
839 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
840 | ret = -EINVAL; | ||
841 | break; | ||
842 | case 0x0004: /* Command not provided in model. */ | ||
843 | CIO_CRW_EVENT(2, "Model does not provide scpd\n"); | ||
844 | ret = -EOPNOTSUPP; | ||
845 | break; | ||
846 | default: | ||
847 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
848 | scpd_area->response.code); | 835 | scpd_area->response.code); |
849 | ret = -EIO; | ||
850 | } | ||
851 | out: | 836 | out: |
852 | free_page((unsigned long)scpd_area); | 837 | free_page((unsigned long)scpd_area); |
853 | return ret; | 838 | return ret; |
@@ -923,8 +908,9 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
923 | goto out; | 908 | goto out; |
924 | } | 909 | } |
925 | 910 | ||
926 | switch (scmc_area->response.code) { | 911 | ret = chsc_error_from_response(scmc_area->response.code); |
927 | case 0x0001: /* Success. */ | 912 | if (ret == 0) { |
913 | /* Success. */ | ||
928 | if (!scmc_area->not_valid) { | 914 | if (!scmc_area->not_valid) { |
929 | chp->cmg = scmc_area->cmg; | 915 | chp->cmg = scmc_area->cmg; |
930 | chp->shared = scmc_area->shared; | 916 | chp->shared = scmc_area->shared; |
@@ -935,22 +921,9 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
935 | chp->cmg = -1; | 921 | chp->cmg = -1; |
936 | chp->shared = -1; | 922 | chp->shared = -1; |
937 | } | 923 | } |
938 | ret = 0; | 924 | } else { |
939 | break; | 925 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", |
940 | case 0x0003: /* Invalid block. */ | ||
941 | case 0x0007: /* Invalid format. */ | ||
942 | case 0x0008: /* Invalid bit combination. */ | ||
943 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
944 | ret = -EINVAL; | ||
945 | break; | ||
946 | case 0x0004: /* Command not provided. */ | ||
947 | CIO_CRW_EVENT(2, "Model does not provide scmc\n"); | ||
948 | ret = -EOPNOTSUPP; | ||
949 | break; | ||
950 | default: | ||
951 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
952 | scmc_area->response.code); | 926 | scmc_area->response.code); |
953 | ret = -EIO; | ||
954 | } | 927 | } |
955 | out: | 928 | out: |
956 | free_page((unsigned long)scmc_area); | 929 | free_page((unsigned long)scmc_area); |
@@ -1002,21 +975,17 @@ chsc_enable_facility(int operation_code) | |||
1002 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 975 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
1003 | goto out; | 976 | goto out; |
1004 | } | 977 | } |
978 | |||
1005 | switch (sda_area->response.code) { | 979 | switch (sda_area->response.code) { |
1006 | case 0x0001: /* everything ok */ | 980 | case 0x0101: |
1007 | ret = 0; | ||
1008 | break; | ||
1009 | case 0x0003: /* invalid request block */ | ||
1010 | case 0x0007: | ||
1011 | ret = -EINVAL; | ||
1012 | break; | ||
1013 | case 0x0004: /* command not provided */ | ||
1014 | case 0x0101: /* facility not provided */ | ||
1015 | ret = -EOPNOTSUPP; | 981 | ret = -EOPNOTSUPP; |
1016 | break; | 982 | break; |
1017 | default: /* something went wrong */ | 983 | default: |
1018 | ret = -EIO; | 984 | ret = chsc_error_from_response(sda_area->response.code); |
1019 | } | 985 | } |
986 | if (ret != 0) | ||
987 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | ||
988 | operation_code, sda_area->response.code); | ||
1020 | out: | 989 | out: |
1021 | free_page((unsigned long)sda_area); | 990 | free_page((unsigned long)sda_area); |
1022 | return ret; | 991 | return ret; |
@@ -1041,33 +1010,27 @@ chsc_determine_css_characteristics(void) | |||
1041 | } __attribute__ ((packed)) *scsc_area; | 1010 | } __attribute__ ((packed)) *scsc_area; |
1042 | 1011 | ||
1043 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1012 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1044 | if (!scsc_area) { | 1013 | if (!scsc_area) |
1045 | CIO_MSG_EVENT(0, "Was not able to determine available " | ||
1046 | "CHSCs due to no memory.\n"); | ||
1047 | return -ENOMEM; | 1014 | return -ENOMEM; |
1048 | } | ||
1049 | 1015 | ||
1050 | scsc_area->request.length = 0x0010; | 1016 | scsc_area->request.length = 0x0010; |
1051 | scsc_area->request.code = 0x0010; | 1017 | scsc_area->request.code = 0x0010; |
1052 | 1018 | ||
1053 | result = chsc(scsc_area); | 1019 | result = chsc(scsc_area); |
1054 | if (result) { | 1020 | if (result) { |
1055 | CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, " | 1021 | result = (result == 3) ? -ENODEV : -EBUSY; |
1056 | "cc=%i.\n", result); | ||
1057 | result = -EIO; | ||
1058 | goto exit; | 1022 | goto exit; |
1059 | } | 1023 | } |
1060 | 1024 | ||
1061 | if (scsc_area->response.code != 1) { | 1025 | result = chsc_error_from_response(scsc_area->response.code); |
1062 | CIO_MSG_EVENT(0, "Was not able to determine " | 1026 | if (result == 0) { |
1063 | "available CHSCs.\n"); | 1027 | memcpy(&css_general_characteristics, scsc_area->general_char, |
1064 | result = -EIO; | 1028 | sizeof(css_general_characteristics)); |
1065 | goto exit; | 1029 | memcpy(&css_chsc_characteristics, scsc_area->chsc_char, |
1066 | } | 1030 | sizeof(css_chsc_characteristics)); |
1067 | memcpy(&css_general_characteristics, scsc_area->general_char, | 1031 | } else |
1068 | sizeof(css_general_characteristics)); | 1032 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", |
1069 | memcpy(&css_chsc_characteristics, scsc_area->chsc_char, | 1033 | scsc_area->response.code); |
1070 | sizeof(css_chsc_characteristics)); | ||
1071 | exit: | 1034 | exit: |
1072 | free_page ((unsigned long) scsc_area); | 1035 | free_page ((unsigned long) scsc_area); |
1073 | return result; | 1036 | return result; |
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 918b8b89cf9a..dc4d87f77f6c 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c | |||
@@ -26,17 +26,18 @@ | |||
26 | #include "ioasm.h" | 26 | #include "ioasm.h" |
27 | #include "io_sch.h" | 27 | #include "io_sch.h" |
28 | 28 | ||
29 | /* | 29 | /** |
30 | * Input : | 30 | * vm_vdev_to_cu_type - Convert vm virtual device into control unit type |
31 | * devno - device number | 31 | * for certain devices. |
32 | * ps - pointer to sense ID data area | 32 | * @class: virtual device class |
33 | * Output : none | 33 | * @type: virtual device type |
34 | * | ||
35 | * Returns control unit type if a match was made or %0xffff otherwise. | ||
34 | */ | 36 | */ |
35 | static void | 37 | static int vm_vdev_to_cu_type(int class, int type) |
36 | VM_virtual_device_info (__u16 devno, struct senseid *ps) | ||
37 | { | 38 | { |
38 | static struct { | 39 | static struct { |
39 | int vrdcvcla, vrdcvtyp, cu_type; | 40 | int class, type, cu_type; |
40 | } vm_devices[] = { | 41 | } vm_devices[] = { |
41 | { 0x08, 0x01, 0x3480 }, | 42 | { 0x08, 0x01, 0x3480 }, |
42 | { 0x08, 0x02, 0x3430 }, | 43 | { 0x08, 0x02, 0x3430 }, |
@@ -68,8 +69,26 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps) | |||
68 | { 0x40, 0xc0, 0x5080 }, | 69 | { 0x40, 0xc0, 0x5080 }, |
69 | { 0x80, 0x00, 0x3215 }, | 70 | { 0x80, 0x00, 0x3215 }, |
70 | }; | 71 | }; |
72 | int i; | ||
73 | |||
74 | for (i = 0; i < ARRAY_SIZE(vm_devices); i++) | ||
75 | if (class == vm_devices[i].class && type == vm_devices[i].type) | ||
76 | return vm_devices[i].cu_type; | ||
77 | |||
78 | return 0xffff; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * diag_get_dev_info - retrieve device information via DIAG X'210' | ||
83 | * @devno: device number | ||
84 | * @ps: pointer to sense ID data area | ||
85 | * | ||
86 | * Returns zero on success, non-zero otherwise. | ||
87 | */ | ||
88 | static int diag_get_dev_info(u16 devno, struct senseid *ps) | ||
89 | { | ||
71 | struct diag210 diag_data; | 90 | struct diag210 diag_data; |
72 | int ccode, i; | 91 | int ccode; |
73 | 92 | ||
74 | CIO_TRACE_EVENT (4, "VMvdinf"); | 93 | CIO_TRACE_EVENT (4, "VMvdinf"); |
75 | 94 | ||
@@ -79,21 +98,21 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps) | |||
79 | }; | 98 | }; |
80 | 99 | ||
81 | ccode = diag210 (&diag_data); | 100 | ccode = diag210 (&diag_data); |
82 | ps->reserved = 0xff; | 101 | if ((ccode == 0) || (ccode == 2)) { |
102 | ps->reserved = 0xff; | ||
83 | 103 | ||
84 | /* Special case for bloody osa devices. */ | 104 | /* Special case for osa devices. */ |
85 | if (diag_data.vrdcvcla == 0x02 && | 105 | if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) { |
86 | diag_data.vrdcvtyp == 0x20) { | 106 | ps->cu_type = 0x3088; |
87 | ps->cu_type = 0x3088; | 107 | ps->cu_model = 0x60; |
88 | ps->cu_model = 0x60; | 108 | return 0; |
89 | return; | ||
90 | } | ||
91 | for (i = 0; i < ARRAY_SIZE(vm_devices); i++) | ||
92 | if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla && | ||
93 | diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) { | ||
94 | ps->cu_type = vm_devices[i].cu_type; | ||
95 | return; | ||
96 | } | 109 | } |
110 | ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla, | ||
111 | diag_data.vrdcvtyp); | ||
112 | if (ps->cu_type != 0xffff) | ||
113 | return 0; | ||
114 | } | ||
115 | |||
97 | CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):" | 116 | CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):" |
98 | "vdev class : %02X, vdev type : %04X \n ... " | 117 | "vdev class : %02X, vdev type : %04X \n ... " |
99 | "rdev class : %02X, rdev type : %04X, " | 118 | "rdev class : %02X, rdev type : %04X, " |
@@ -102,6 +121,8 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps) | |||
102 | diag_data.vrdcvcla, diag_data.vrdcvtyp, | 121 | diag_data.vrdcvcla, diag_data.vrdcvtyp, |
103 | diag_data.vrdcrccl, diag_data.vrdccrty, | 122 | diag_data.vrdcrccl, diag_data.vrdccrty, |
104 | diag_data.vrdccrmd); | 123 | diag_data.vrdccrmd); |
124 | |||
125 | return -ENODEV; | ||
105 | } | 126 | } |
106 | 127 | ||
107 | /* | 128 | /* |
@@ -130,6 +151,7 @@ __ccw_device_sense_id_start(struct ccw_device *cdev) | |||
130 | /* Try on every path. */ | 151 | /* Try on every path. */ |
131 | ret = -ENODEV; | 152 | ret = -ENODEV; |
132 | while (cdev->private->imask != 0) { | 153 | while (cdev->private->imask != 0) { |
154 | cdev->private->senseid.cu_type = 0xFFFF; | ||
133 | if ((sch->opm & cdev->private->imask) != 0 && | 155 | if ((sch->opm & cdev->private->imask) != 0 && |
134 | cdev->private->iretry > 0) { | 156 | cdev->private->iretry > 0) { |
135 | cdev->private->iretry--; | 157 | cdev->private->iretry--; |
@@ -153,7 +175,6 @@ ccw_device_sense_id_start(struct ccw_device *cdev) | |||
153 | int ret; | 175 | int ret; |
154 | 176 | ||
155 | memset (&cdev->private->senseid, 0, sizeof (struct senseid)); | 177 | memset (&cdev->private->senseid, 0, sizeof (struct senseid)); |
156 | cdev->private->senseid.cu_type = 0xFFFF; | ||
157 | cdev->private->imask = 0x80; | 178 | cdev->private->imask = 0x80; |
158 | cdev->private->iretry = 5; | 179 | cdev->private->iretry = 5; |
159 | ret = __ccw_device_sense_id_start(cdev); | 180 | ret = __ccw_device_sense_id_start(cdev); |
@@ -173,13 +194,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
173 | 194 | ||
174 | sch = to_subchannel(cdev->dev.parent); | 195 | sch = to_subchannel(cdev->dev.parent); |
175 | irb = &cdev->private->irb; | 196 | irb = &cdev->private->irb; |
176 | /* Did we get a proper answer ? */ | 197 | |
177 | if (cdev->private->senseid.cu_type != 0xFFFF && | ||
178 | cdev->private->senseid.reserved == 0xFF) { | ||
179 | if (irb->scsw.count < sizeof (struct senseid) - 8) | ||
180 | cdev->private->flags.esid = 1; | ||
181 | return 0; /* Success */ | ||
182 | } | ||
183 | /* Check the error cases. */ | 198 | /* Check the error cases. */ |
184 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 199 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
185 | /* Retry Sense ID if requested. */ | 200 | /* Retry Sense ID if requested. */ |
@@ -231,6 +246,15 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
231 | sch->schid.ssid, sch->schid.sch_no); | 246 | sch->schid.ssid, sch->schid.sch_no); |
232 | return -EACCES; | 247 | return -EACCES; |
233 | } | 248 | } |
249 | |||
250 | /* Did we get a proper answer ? */ | ||
251 | if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && | ||
252 | cdev->private->senseid.reserved == 0xFF) { | ||
253 | if (irb->scsw.count < sizeof(struct senseid) - 8) | ||
254 | cdev->private->flags.esid = 1; | ||
255 | return 0; /* Success */ | ||
256 | } | ||
257 | |||
234 | /* Hmm, whatever happened, try again. */ | 258 | /* Hmm, whatever happened, try again. */ |
235 | CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " | 259 | CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " |
236 | "subchannel 0.%x.%04x returns status %02X%02X\n", | 260 | "subchannel 0.%x.%04x returns status %02X%02X\n", |
@@ -283,20 +307,17 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
283 | break; | 307 | break; |
284 | /* fall through. */ | 308 | /* fall through. */ |
285 | default: /* Sense ID failed. Try asking VM. */ | 309 | default: /* Sense ID failed. Try asking VM. */ |
286 | if (MACHINE_IS_VM) { | 310 | if (MACHINE_IS_VM) |
287 | VM_virtual_device_info (cdev->private->dev_id.devno, | 311 | ret = diag_get_dev_info(cdev->private->dev_id.devno, |
288 | &cdev->private->senseid); | 312 | &cdev->private->senseid); |
289 | if (cdev->private->senseid.cu_type != 0xFFFF) { | 313 | else |
290 | /* Got the device information from VM. */ | 314 | /* |
291 | ccw_device_sense_id_done(cdev, 0); | 315 | * If we can't couldn't identify the device type we |
292 | return; | 316 | * consider the device "not operational". |
293 | } | 317 | */ |
294 | } | 318 | ret = -ENODEV; |
295 | /* | 319 | |
296 | * If we can't couldn't identify the device type we | 320 | ccw_device_sense_id_done(cdev, ret); |
297 | * consider the device "not operational". | ||
298 | */ | ||
299 | ccw_device_sense_id_done(cdev, -ENODEV); | ||
300 | break; | 321 | break; |
301 | } | 322 | } |
302 | } | 323 | } |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0e8267c1e915..fb0886140dd7 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -449,9 +449,6 @@ static int aac_slave_configure(struct scsi_device *sdev) | |||
449 | else if (depth < 2) | 449 | else if (depth < 2) |
450 | depth = 2; | 450 | depth = 2; |
451 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); | 451 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); |
452 | if (!(((struct aac_dev *)host->hostdata)->adapter_info.options & | ||
453 | AAC_OPT_NEW_COMM)) | ||
454 | blk_queue_max_segment_size(sdev->request_queue, 65536); | ||
455 | } else | 452 | } else |
456 | scsi_adjust_queue_depth(sdev, 0, 1); | 453 | scsi_adjust_queue_depth(sdev, 0, 1); |
457 | 454 | ||
@@ -1133,6 +1130,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, | |||
1133 | if (error < 0) | 1130 | if (error < 0) |
1134 | goto out_deinit; | 1131 | goto out_deinit; |
1135 | 1132 | ||
1133 | if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) { | ||
1134 | error = pci_set_dma_max_seg_size(pdev, 65536); | ||
1135 | if (error) | ||
1136 | goto out_deinit; | ||
1137 | } | ||
1138 | |||
1136 | /* | 1139 | /* |
1137 | * Lets override negotiations and drop the maximum SG limit to 34 | 1140 | * Lets override negotiations and drop the maximum SG limit to 34 |
1138 | */ | 1141 | */ |
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c index 4b82b2021981..d8b99351b053 100644 --- a/drivers/scsi/pcmcia/fdomain_stub.c +++ b/drivers/scsi/pcmcia/fdomain_stub.c | |||
@@ -130,7 +130,7 @@ static int fdomain_config(struct pcmcia_device *link) | |||
130 | cisparse_t parse; | 130 | cisparse_t parse; |
131 | int i, last_ret, last_fn; | 131 | int i, last_ret, last_fn; |
132 | u_char tuple_data[64]; | 132 | u_char tuple_data[64]; |
133 | char str[16]; | 133 | char str[22]; |
134 | struct Scsi_Host *host; | 134 | struct Scsi_Host *host; |
135 | 135 | ||
136 | DEBUG(0, "fdomain_config(0x%p)\n", link); | 136 | DEBUG(0, "fdomain_config(0x%p)\n", link); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b12fb310e399..f243fc30c908 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1569,6 +1569,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1569 | request_fn_proc *request_fn) | 1569 | request_fn_proc *request_fn) |
1570 | { | 1570 | { |
1571 | struct request_queue *q; | 1571 | struct request_queue *q; |
1572 | struct device *dev = shost->shost_gendev.parent; | ||
1572 | 1573 | ||
1573 | q = blk_init_queue(request_fn, NULL); | 1574 | q = blk_init_queue(request_fn, NULL); |
1574 | if (!q) | 1575 | if (!q) |
@@ -1583,6 +1584,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1583 | blk_queue_max_sectors(q, shost->max_sectors); | 1584 | blk_queue_max_sectors(q, shost->max_sectors); |
1584 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | 1585 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); |
1585 | blk_queue_segment_boundary(q, shost->dma_boundary); | 1586 | blk_queue_segment_boundary(q, shost->dma_boundary); |
1587 | dma_set_seg_boundary(dev, shost->dma_boundary); | ||
1588 | |||
1589 | blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); | ||
1586 | 1590 | ||
1587 | if (!shost->use_clustering) | 1591 | if (!shost->use_clustering) |
1588 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 1592 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index f94109cbb46e..b8a4bd94f51d 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -2047,7 +2047,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, | |||
2047 | * Oxford Semi 952 rev B workaround | 2047 | * Oxford Semi 952 rev B workaround |
2048 | */ | 2048 | */ |
2049 | if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0) | 2049 | if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0) |
2050 | quot ++; | 2050 | quot++; |
2051 | 2051 | ||
2052 | if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) { | 2052 | if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) { |
2053 | if (baud < 2400) | 2053 | if (baud < 2400) |
@@ -2662,16 +2662,17 @@ static int __devinit serial8250_probe(struct platform_device *dev) | |||
2662 | memset(&port, 0, sizeof(struct uart_port)); | 2662 | memset(&port, 0, sizeof(struct uart_port)); |
2663 | 2663 | ||
2664 | for (i = 0; p && p->flags != 0; p++, i++) { | 2664 | for (i = 0; p && p->flags != 0; p++, i++) { |
2665 | port.iobase = p->iobase; | 2665 | port.iobase = p->iobase; |
2666 | port.membase = p->membase; | 2666 | port.membase = p->membase; |
2667 | port.irq = p->irq; | 2667 | port.irq = p->irq; |
2668 | port.uartclk = p->uartclk; | 2668 | port.uartclk = p->uartclk; |
2669 | port.regshift = p->regshift; | 2669 | port.regshift = p->regshift; |
2670 | port.iotype = p->iotype; | 2670 | port.iotype = p->iotype; |
2671 | port.flags = p->flags; | 2671 | port.flags = p->flags; |
2672 | port.mapbase = p->mapbase; | 2672 | port.mapbase = p->mapbase; |
2673 | port.hub6 = p->hub6; | 2673 | port.hub6 = p->hub6; |
2674 | port.dev = &dev->dev; | 2674 | port.private_data = p->private_data; |
2675 | port.dev = &dev->dev; | ||
2675 | if (share_irqs) | 2676 | if (share_irqs) |
2676 | port.flags |= UPF_SHARE_IRQ; | 2677 | port.flags |= UPF_SHARE_IRQ; |
2677 | ret = serial8250_register_port(&port); | 2678 | ret = serial8250_register_port(&port); |
@@ -2812,15 +2813,16 @@ int serial8250_register_port(struct uart_port *port) | |||
2812 | if (uart) { | 2813 | if (uart) { |
2813 | uart_remove_one_port(&serial8250_reg, &uart->port); | 2814 | uart_remove_one_port(&serial8250_reg, &uart->port); |
2814 | 2815 | ||
2815 | uart->port.iobase = port->iobase; | 2816 | uart->port.iobase = port->iobase; |
2816 | uart->port.membase = port->membase; | 2817 | uart->port.membase = port->membase; |
2817 | uart->port.irq = port->irq; | 2818 | uart->port.irq = port->irq; |
2818 | uart->port.uartclk = port->uartclk; | 2819 | uart->port.uartclk = port->uartclk; |
2819 | uart->port.fifosize = port->fifosize; | 2820 | uart->port.fifosize = port->fifosize; |
2820 | uart->port.regshift = port->regshift; | 2821 | uart->port.regshift = port->regshift; |
2821 | uart->port.iotype = port->iotype; | 2822 | uart->port.iotype = port->iotype; |
2822 | uart->port.flags = port->flags | UPF_BOOT_AUTOCONF; | 2823 | uart->port.flags = port->flags | UPF_BOOT_AUTOCONF; |
2823 | uart->port.mapbase = port->mapbase; | 2824 | uart->port.mapbase = port->mapbase; |
2825 | uart->port.private_data = port->private_data; | ||
2824 | if (port->dev) | 2826 | if (port->dev) |
2825 | uart->port.dev = port->dev; | 2827 | uart->port.dev = port->dev; |
2826 | 2828 | ||
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index ceb03c9e749f..0a4ac2b6eb5a 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -106,6 +106,32 @@ setup_port(struct serial_private *priv, struct uart_port *port, | |||
106 | } | 106 | } |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * ADDI-DATA GmbH communication cards <info@addi-data.com> | ||
110 | */ | ||
111 | static int addidata_apci7800_setup(struct serial_private *priv, | ||
112 | struct pciserial_board *board, | ||
113 | struct uart_port *port, int idx) | ||
114 | { | ||
115 | unsigned int bar = 0, offset = board->first_offset; | ||
116 | bar = FL_GET_BASE(board->flags); | ||
117 | |||
118 | if (idx < 2) { | ||
119 | offset += idx * board->uart_offset; | ||
120 | } else if ((idx >= 2) && (idx < 4)) { | ||
121 | bar += 1; | ||
122 | offset += ((idx - 2) * board->uart_offset); | ||
123 | } else if ((idx >= 4) && (idx < 6)) { | ||
124 | bar += 2; | ||
125 | offset += ((idx - 4) * board->uart_offset); | ||
126 | } else if (idx >= 6) { | ||
127 | bar += 3; | ||
128 | offset += ((idx - 6) * board->uart_offset); | ||
129 | } | ||
130 | |||
131 | return setup_port(priv, port, bar, offset, board->reg_shift); | ||
132 | } | ||
133 | |||
134 | /* | ||
109 | * AFAVLAB uses a different mixture of BARs and offsets | 135 | * AFAVLAB uses a different mixture of BARs and offsets |
110 | * Not that ugly ;) -- HW | 136 | * Not that ugly ;) -- HW |
111 | */ | 137 | */ |
@@ -752,6 +778,16 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board, | |||
752 | */ | 778 | */ |
753 | static struct pci_serial_quirk pci_serial_quirks[] = { | 779 | static struct pci_serial_quirk pci_serial_quirks[] = { |
754 | /* | 780 | /* |
781 | * ADDI-DATA GmbH communication cards <info@addi-data.com> | ||
782 | */ | ||
783 | { | ||
784 | .vendor = PCI_VENDOR_ID_ADDIDATA_OLD, | ||
785 | .device = PCI_DEVICE_ID_ADDIDATA_APCI7800, | ||
786 | .subvendor = PCI_ANY_ID, | ||
787 | .subdevice = PCI_ANY_ID, | ||
788 | .setup = addidata_apci7800_setup, | ||
789 | }, | ||
790 | /* | ||
755 | * AFAVLAB cards - these may be called via parport_serial | 791 | * AFAVLAB cards - these may be called via parport_serial |
756 | * It is not clear whether this applies to all products. | 792 | * It is not clear whether this applies to all products. |
757 | */ | 793 | */ |
@@ -1179,6 +1215,12 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1179 | .base_baud = 115200, | 1215 | .base_baud = 115200, |
1180 | .uart_offset = 8, | 1216 | .uart_offset = 8, |
1181 | }, | 1217 | }, |
1218 | [pbn_b0_8_115200] = { | ||
1219 | .flags = FL_BASE0, | ||
1220 | .num_ports = 8, | ||
1221 | .base_baud = 115200, | ||
1222 | .uart_offset = 8, | ||
1223 | }, | ||
1182 | 1224 | ||
1183 | [pbn_b0_1_921600] = { | 1225 | [pbn_b0_1_921600] = { |
1184 | .flags = FL_BASE0, | 1226 | .flags = FL_BASE0, |
@@ -2697,6 +2739,97 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2697 | pbn_pasemi_1682M }, | 2739 | pbn_pasemi_1682M }, |
2698 | 2740 | ||
2699 | /* | 2741 | /* |
2742 | * ADDI-DATA GmbH communication cards <info@addi-data.com> | ||
2743 | */ | ||
2744 | { PCI_VENDOR_ID_ADDIDATA, | ||
2745 | PCI_DEVICE_ID_ADDIDATA_APCI7500, | ||
2746 | PCI_ANY_ID, | ||
2747 | PCI_ANY_ID, | ||
2748 | 0, | ||
2749 | 0, | ||
2750 | pbn_b0_4_115200 }, | ||
2751 | |||
2752 | { PCI_VENDOR_ID_ADDIDATA, | ||
2753 | PCI_DEVICE_ID_ADDIDATA_APCI7420, | ||
2754 | PCI_ANY_ID, | ||
2755 | PCI_ANY_ID, | ||
2756 | 0, | ||
2757 | 0, | ||
2758 | pbn_b0_2_115200 }, | ||
2759 | |||
2760 | { PCI_VENDOR_ID_ADDIDATA, | ||
2761 | PCI_DEVICE_ID_ADDIDATA_APCI7300, | ||
2762 | PCI_ANY_ID, | ||
2763 | PCI_ANY_ID, | ||
2764 | 0, | ||
2765 | 0, | ||
2766 | pbn_b0_1_115200 }, | ||
2767 | |||
2768 | { PCI_VENDOR_ID_ADDIDATA_OLD, | ||
2769 | PCI_DEVICE_ID_ADDIDATA_APCI7800, | ||
2770 | PCI_ANY_ID, | ||
2771 | PCI_ANY_ID, | ||
2772 | 0, | ||
2773 | 0, | ||
2774 | pbn_b1_8_115200 }, | ||
2775 | |||
2776 | { PCI_VENDOR_ID_ADDIDATA, | ||
2777 | PCI_DEVICE_ID_ADDIDATA_APCI7500_2, | ||
2778 | PCI_ANY_ID, | ||
2779 | PCI_ANY_ID, | ||
2780 | 0, | ||
2781 | 0, | ||
2782 | pbn_b0_4_115200 }, | ||
2783 | |||
2784 | { PCI_VENDOR_ID_ADDIDATA, | ||
2785 | PCI_DEVICE_ID_ADDIDATA_APCI7420_2, | ||
2786 | PCI_ANY_ID, | ||
2787 | PCI_ANY_ID, | ||
2788 | 0, | ||
2789 | 0, | ||
2790 | pbn_b0_2_115200 }, | ||
2791 | |||
2792 | { PCI_VENDOR_ID_ADDIDATA, | ||
2793 | PCI_DEVICE_ID_ADDIDATA_APCI7300_2, | ||
2794 | PCI_ANY_ID, | ||
2795 | PCI_ANY_ID, | ||
2796 | 0, | ||
2797 | 0, | ||
2798 | pbn_b0_1_115200 }, | ||
2799 | |||
2800 | { PCI_VENDOR_ID_ADDIDATA, | ||
2801 | PCI_DEVICE_ID_ADDIDATA_APCI7500_3, | ||
2802 | PCI_ANY_ID, | ||
2803 | PCI_ANY_ID, | ||
2804 | 0, | ||
2805 | 0, | ||
2806 | pbn_b0_4_115200 }, | ||
2807 | |||
2808 | { PCI_VENDOR_ID_ADDIDATA, | ||
2809 | PCI_DEVICE_ID_ADDIDATA_APCI7420_3, | ||
2810 | PCI_ANY_ID, | ||
2811 | PCI_ANY_ID, | ||
2812 | 0, | ||
2813 | 0, | ||
2814 | pbn_b0_2_115200 }, | ||
2815 | |||
2816 | { PCI_VENDOR_ID_ADDIDATA, | ||
2817 | PCI_DEVICE_ID_ADDIDATA_APCI7300_3, | ||
2818 | PCI_ANY_ID, | ||
2819 | PCI_ANY_ID, | ||
2820 | 0, | ||
2821 | 0, | ||
2822 | pbn_b0_1_115200 }, | ||
2823 | |||
2824 | { PCI_VENDOR_ID_ADDIDATA, | ||
2825 | PCI_DEVICE_ID_ADDIDATA_APCI7800_3, | ||
2826 | PCI_ANY_ID, | ||
2827 | PCI_ANY_ID, | ||
2828 | 0, | ||
2829 | 0, | ||
2830 | pbn_b0_8_115200 }, | ||
2831 | |||
2832 | /* | ||
2700 | * These entries match devices with class COMMUNICATION_SERIAL, | 2833 | * These entries match devices with class COMMUNICATION_SERIAL, |
2701 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL | 2834 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL |
2702 | */ | 2835 | */ |
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 1de098e75497..6f09cbd7fc48 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c | |||
@@ -414,8 +414,9 @@ static int __devinit check_resources(struct pnp_option *option) | |||
414 | */ | 414 | */ |
415 | static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags) | 415 | static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags) |
416 | { | 416 | { |
417 | if (!(check_name(pnp_dev_name(dev)) || (dev->card && check_name(dev->card->name)))) | 417 | if (!(check_name(pnp_dev_name(dev)) || |
418 | return -ENODEV; | 418 | (dev->card && check_name(dev->card->name)))) |
419 | return -ENODEV; | ||
419 | 420 | ||
420 | if (check_resources(dev->independent)) | 421 | if (check_resources(dev->independent)) |
421 | return 0; | 422 | return 0; |
@@ -452,8 +453,9 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
452 | return -ENODEV; | 453 | return -ENODEV; |
453 | 454 | ||
454 | #ifdef SERIAL_DEBUG_PNP | 455 | #ifdef SERIAL_DEBUG_PNP |
455 | printk("Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n", | 456 | printk(KERN_DEBUG |
456 | port.iobase, port.mapbase, port.irq, port.iotype); | 457 | "Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n", |
458 | port.iobase, port.mapbase, port.irq, port.iotype); | ||
457 | #endif | 459 | #endif |
458 | 460 | ||
459 | port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; | 461 | port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; |
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c index 051fcc2f5ba8..e76fc72c9b36 100644 --- a/drivers/serial/mcf.c +++ b/drivers/serial/mcf.c | |||
@@ -434,7 +434,7 @@ static struct uart_ops mcf_uart_ops = { | |||
434 | 434 | ||
435 | static struct mcf_uart mcf_ports[3]; | 435 | static struct mcf_uart mcf_ports[3]; |
436 | 436 | ||
437 | #define MCF_MAXPORTS (sizeof(mcf_ports) / sizeof(struct mcf_uart)) | 437 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) |
438 | 438 | ||
439 | /****************************************************************************/ | 439 | /****************************************************************************/ |
440 | #if defined(CONFIG_SERIAL_MCF_CONSOLE) | 440 | #if defined(CONFIG_SERIAL_MCF_CONSOLE) |
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c index 4d643c926657..cb3a91967742 100644 --- a/drivers/serial/mpsc.c +++ b/drivers/serial/mpsc.c | |||
@@ -612,6 +612,7 @@ static void mpsc_hw_init(struct mpsc_port_info *pi) | |||
612 | 612 | ||
613 | /* No preamble, 16x divider, low-latency, */ | 613 | /* No preamble, 16x divider, low-latency, */ |
614 | writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); | 614 | writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); |
615 | mpsc_set_baudrate(pi, pi->default_baud); | ||
615 | 616 | ||
616 | if (pi->mirror_regs) { | 617 | if (pi->mirror_regs) { |
617 | pi->MPSC_CHR_1_m = 0; | 618 | pi->MPSC_CHR_1_m = 0; |
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c index e773c8e14962..45de19366030 100644 --- a/drivers/serial/s3c2410.c +++ b/drivers/serial/s3c2410.c | |||
@@ -1527,7 +1527,7 @@ static inline void s3c2440_serial_exit(void) | |||
1527 | #define s3c2440_uart_inf_at NULL | 1527 | #define s3c2440_uart_inf_at NULL |
1528 | #endif /* CONFIG_CPU_S3C2440 */ | 1528 | #endif /* CONFIG_CPU_S3C2440 */ |
1529 | 1529 | ||
1530 | #if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) | 1530 | #if defined(CONFIG_CPU_S3C2412) |
1531 | 1531 | ||
1532 | static int s3c2412_serial_setsource(struct uart_port *port, | 1532 | static int s3c2412_serial_setsource(struct uart_port *port, |
1533 | struct s3c24xx_uart_clksrc *clk) | 1533 | struct s3c24xx_uart_clksrc *clk) |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 3bb5d241dd40..276da148c57e 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -371,7 +371,8 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, | |||
371 | */ | 371 | */ |
372 | termios->c_cflag &= ~CBAUD; | 372 | termios->c_cflag &= ~CBAUD; |
373 | if (old) { | 373 | if (old) { |
374 | termios->c_cflag |= old->c_cflag & CBAUD; | 374 | baud = tty_termios_baud_rate(old); |
375 | tty_termios_encode_baud_rate(termios, baud, baud); | ||
375 | old = NULL; | 376 | old = NULL; |
376 | continue; | 377 | continue; |
377 | } | 378 | } |
@@ -380,7 +381,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, | |||
380 | * As a last resort, if the quotient is zero, | 381 | * As a last resort, if the quotient is zero, |
381 | * default to 9600 bps | 382 | * default to 9600 bps |
382 | */ | 383 | */ |
383 | termios->c_cflag |= B9600; | 384 | tty_termios_encode_baud_rate(termios, 9600, 9600); |
384 | } | 385 | } |
385 | 386 | ||
386 | return 0; | 387 | return 0; |
@@ -1977,6 +1978,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | |||
1977 | 1978 | ||
1978 | if (state->info && state->info->flags & UIF_INITIALIZED) { | 1979 | if (state->info && state->info->flags & UIF_INITIALIZED) { |
1979 | const struct uart_ops *ops = port->ops; | 1980 | const struct uart_ops *ops = port->ops; |
1981 | int tries; | ||
1980 | 1982 | ||
1981 | state->info->flags = (state->info->flags & ~UIF_INITIALIZED) | 1983 | state->info->flags = (state->info->flags & ~UIF_INITIALIZED) |
1982 | | UIF_SUSPENDED; | 1984 | | UIF_SUSPENDED; |
@@ -1990,9 +1992,14 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | |||
1990 | /* | 1992 | /* |
1991 | * Wait for the transmitter to empty. | 1993 | * Wait for the transmitter to empty. |
1992 | */ | 1994 | */ |
1993 | while (!ops->tx_empty(port)) { | 1995 | for (tries = 3; !ops->tx_empty(port) && tries; tries--) { |
1994 | msleep(10); | 1996 | msleep(10); |
1995 | } | 1997 | } |
1998 | if (!tries) | ||
1999 | printk(KERN_ERR "%s%s%s%d: Unable to drain transmitter\n", | ||
2000 | port->dev ? port->dev->bus_id : "", | ||
2001 | port->dev ? ": " : "", | ||
2002 | drv->dev_name, port->line); | ||
1996 | 2003 | ||
1997 | ops->shutdown(port); | 2004 | ops->shutdown(port); |
1998 | } | 2005 | } |
@@ -2029,8 +2036,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2029 | } | 2036 | } |
2030 | port->suspended = 0; | 2037 | port->suspended = 0; |
2031 | 2038 | ||
2032 | uart_change_pm(state, 0); | ||
2033 | |||
2034 | /* | 2039 | /* |
2035 | * Re-enable the console device after suspending. | 2040 | * Re-enable the console device after suspending. |
2036 | */ | 2041 | */ |
@@ -2049,6 +2054,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2049 | if (state->info && state->info->tty && termios.c_cflag == 0) | 2054 | if (state->info && state->info->tty && termios.c_cflag == 0) |
2050 | termios = *state->info->tty->termios; | 2055 | termios = *state->info->tty->termios; |
2051 | 2056 | ||
2057 | uart_change_pm(state, 0); | ||
2052 | port->ops->set_termios(port, &termios, NULL); | 2058 | port->ops->set_termios(port, &termios, NULL); |
2053 | console_start(port->cons); | 2059 | console_start(port->cons); |
2054 | } | 2060 | } |
@@ -2057,6 +2063,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2057 | const struct uart_ops *ops = port->ops; | 2063 | const struct uart_ops *ops = port->ops; |
2058 | int ret; | 2064 | int ret; |
2059 | 2065 | ||
2066 | uart_change_pm(state, 0); | ||
2060 | ops->set_mctrl(port, 0); | 2067 | ops->set_mctrl(port, 0); |
2061 | ret = ops->startup(port); | 2068 | ret = ops->startup(port); |
2062 | if (ret == 0) { | 2069 | if (ret == 0) { |
@@ -2150,10 +2157,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, | |||
2150 | 2157 | ||
2151 | /* | 2158 | /* |
2152 | * Ensure that the modem control lines are de-activated. | 2159 | * Ensure that the modem control lines are de-activated. |
2160 | * keep the DTR setting that is set in uart_set_options() | ||
2153 | * We probably don't need a spinlock around this, but | 2161 | * We probably don't need a spinlock around this, but |
2154 | */ | 2162 | */ |
2155 | spin_lock_irqsave(&port->lock, flags); | 2163 | spin_lock_irqsave(&port->lock, flags); |
2156 | port->ops->set_mctrl(port, 0); | 2164 | port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); |
2157 | spin_unlock_irqrestore(&port->lock, flags); | 2165 | spin_unlock_irqrestore(&port->lock, flags); |
2158 | 2166 | ||
2159 | /* | 2167 | /* |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index d8b660061c13..164d2a42eb59 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -389,7 +389,7 @@ static void serial_detach(struct pcmcia_device *link) | |||
389 | /*====================================================================*/ | 389 | /*====================================================================*/ |
390 | 390 | ||
391 | static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, | 391 | static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, |
392 | kio_addr_t iobase, int irq) | 392 | unsigned int iobase, int irq) |
393 | { | 393 | { |
394 | struct uart_port port; | 394 | struct uart_port port; |
395 | int line; | 395 | int line; |
@@ -456,7 +456,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse) | |||
456 | 456 | ||
457 | static int simple_config(struct pcmcia_device *link) | 457 | static int simple_config(struct pcmcia_device *link) |
458 | { | 458 | { |
459 | static const kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; | 459 | static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; |
460 | static const int size_table[2] = { 8, 16 }; | 460 | static const int size_table[2] = { 8, 16 }; |
461 | struct serial_info *info = link->priv; | 461 | struct serial_info *info = link->priv; |
462 | struct serial_cfg_mem *cfg_mem; | 462 | struct serial_cfg_mem *cfg_mem; |
@@ -480,7 +480,7 @@ static int simple_config(struct pcmcia_device *link) | |||
480 | /* If the card is already configured, look up the port and irq */ | 480 | /* If the card is already configured, look up the port and irq */ |
481 | i = pcmcia_get_configuration_info(link, &config); | 481 | i = pcmcia_get_configuration_info(link, &config); |
482 | if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) { | 482 | if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) { |
483 | kio_addr_t port = 0; | 483 | unsigned int port = 0; |
484 | if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) { | 484 | if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) { |
485 | port = config.BasePort2; | 485 | port = config.BasePort2; |
486 | info->slave = 1; | 486 | info->slave = 1; |