aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sleep.c90
-rw-r--r--drivers/base/iommu.c43
-rw-r--r--drivers/base/platform.c30
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c65
-rw-r--r--drivers/block/amiflop.c47
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/char/i8k.c21
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tpm/Kconfig6
-rw-r--r--drivers/char/tpm/tpm.c47
-rw-r--r--drivers/char/tpm/tpm_tis.c40
-rw-r--r--drivers/char/tty_io.c1
-rw-r--r--drivers/clocksource/cs5535-clockevt.c8
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c48
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c115
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dma/shdma.c25
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/gpio/it8761e_gpio.c18
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c19
-rw-r--r--drivers/hid/hid-cherry.c1
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-ntrig.c72
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-wacom.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c13
-rw-r--r--drivers/hwmon/applesmc.c61
-rw-r--r--drivers/hwmon/asc7621.c63
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/i2c-core.c166
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h745
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h550
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/keyboard/Kconfig48
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/corgikbd.c414
-rw-r--r--drivers/input/keyboard/spitzkbd.c496
-rw-r--r--drivers/input/keyboard/tosakbd.c431
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/mouse/elantech.c24
-rw-r--r--drivers/input/mouse/elantech.h5
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/touchscreen/Kconfig20
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7877.c15
-rw-r--r--drivers/input/touchscreen/corgi_ts.c385
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c40
-rw-r--r--drivers/isdn/divert/divert_procfs.c18
-rw-r--r--drivers/mfd/wm831x-core.c3
-rw-r--r--drivers/mfd/wm8350-core.c4
-rw-r--r--drivers/misc/vmware_balloon.c4
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c18
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/net/a2065.c1
-rw-r--r--drivers/net/ariadne.c1
-rw-r--r--drivers/net/e1000e/netdev.c22
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/phy/micrel.c1
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c11
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c3
-rw-r--r--drivers/net/zorro8390.c1
-rw-r--r--drivers/oprofile/cpu_buffer.c75
-rw-r--r--drivers/oprofile/oprof.c12
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/timer_int.c78
-rw-r--r--drivers/pci/intel-iommu.c22
-rw-r--r--drivers/pci/setup-bus.c114
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c229
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-davinci.c673
-rw-r--r--drivers/s390/block/dasd.c39
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c125
-rw-r--r--drivers/s390/block/dasd_devmap.c174
-rw-r--r--drivers/s390/block/dasd_eckd.c116
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h49
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c21
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/qdio.h15
-rw-r--r--drivers/s390/cio/qdio_main.c67
-rw-r--r--drivers/s390/cio/qdio_setup.c8
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c17
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/scsi/zorro7xx.c1
-rw-r--r--drivers/serial/Kconfig23
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/atmel_serial.c207
-rw-r--r--drivers/serial/imx.c10
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/sh-sci.c189
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
-rw-r--r--drivers/spi/pxa2xx_spi.c25
-rw-r--r--drivers/usb/core/inode.c1
-rw-r--r--drivers/usb/gadget/at91_udc.c7
-rw-r--r--drivers/vhost/vhost.c7
-rw-r--r--drivers/video/amifb.c49
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c15
-rw-r--r--drivers/video/cirrusfb.c1
-rw-r--r--drivers/video/fm2fb.c1
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/mpcore_wdt.c21
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/zorro/proc.c6
-rw-r--r--drivers/zorro/zorro-driver.c24
-rw-r--r--drivers/zorro/zorro-sysfs.c11
-rw-r--r--drivers/zorro/zorro.c243
193 files changed, 13831 insertions, 3539 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee682..e4804fb05e23 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
401 * driver reported one, then use it. Exit in any case. 401 * driver reported one, then use it. Exit in any case.
402 */ 402 */
403 if (gsi < 0) { 403 if (gsi < 0) {
404 u32 dev_gsi;
404 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin)); 405 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
405 /* Interrupt Line values above 0xF are forbidden */ 406 /* Interrupt Line values above 0xF are forbidden */
406 if (dev->irq > 0 && (dev->irq <= 0xF)) { 407 if (dev->irq > 0 && (dev->irq <= 0xF) &&
407 printk(" - using IRQ %d\n", dev->irq); 408 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
408 acpi_register_gsi(&dev->dev, dev->irq, 409 printk(" - using ISA IRQ %d\n", dev->irq);
410 acpi_register_gsi(&dev->dev, dev_gsi,
409 ACPI_LEVEL_SENSITIVE, 411 ACPI_LEVEL_SENSITIVE,
410 ACPI_ACTIVE_LOW); 412 ACPI_ACTIVE_LOW);
411 return 0; 413 return 0;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..c3817e1f32c7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
698 "max_cstate: C%d\n" 698 "max_cstate: C%d\n"
699 "maximum allowed latency: %d usec\n", 699 "maximum allowed latency: %d usec\n",
700 pr->power.state ? pr->power.state - pr->power.states : 0, 700 pr->power.state ? pr->power.state - pr->power.states : 0,
701 max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); 701 max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
702 702
703 seq_puts(seq, "states:\n"); 703 seq_puts(seq, "states:\n");
704 704
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e2e992599e68..baa76bbf244a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -475,101 +475,13 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
475 }, 475 },
476 { 476 {
477 .callback = init_set_sci_en_on_resume, 477 .callback = init_set_sci_en_on_resume,
478 .ident = "Lenovo ThinkPad X201", 478 .ident = "Lenovo ThinkPad X201[s]",
479 .matches = { 479 .matches = {
480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), 481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
482 }, 482 },
483 }, 483 },
484 { 484 {
485 .callback = init_set_sci_en_on_resume,
486 .ident = "Lenovo ThinkPad X201",
487 .matches = {
488 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
489 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
490 },
491 },
492 {
493 .callback = init_set_sci_en_on_resume,
494 .ident = "Lenovo ThinkPad T410",
495 .matches = {
496 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
497 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
498 },
499 },
500 {
501 .callback = init_set_sci_en_on_resume,
502 .ident = "Lenovo ThinkPad T510",
503 .matches = {
504 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
505 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
506 },
507 },
508 {
509 .callback = init_set_sci_en_on_resume,
510 .ident = "Lenovo ThinkPad W510",
511 .matches = {
512 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
513 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
514 },
515 },
516 {
517 .callback = init_set_sci_en_on_resume,
518 .ident = "Lenovo ThinkPad X201",
519 .matches = {
520 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
521 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
522 },
523 },
524 {
525 .callback = init_set_sci_en_on_resume,
526 .ident = "Lenovo ThinkPad X201",
527 .matches = {
528 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
529 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
530 },
531 },
532 {
533 .callback = init_set_sci_en_on_resume,
534 .ident = "Lenovo ThinkPad T410",
535 .matches = {
536 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
537 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
538 },
539 },
540 {
541 .callback = init_set_sci_en_on_resume,
542 .ident = "Lenovo ThinkPad T510",
543 .matches = {
544 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
545 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
546 },
547 },
548 {
549 .callback = init_set_sci_en_on_resume,
550 .ident = "Lenovo ThinkPad W510",
551 .matches = {
552 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
553 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
554 },
555 },
556 {
557 .callback = init_set_sci_en_on_resume,
558 .ident = "Lenovo ThinkPad X201",
559 .matches = {
560 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
561 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
562 },
563 },
564 {
565 .callback = init_set_sci_en_on_resume,
566 .ident = "Lenovo ThinkPad X201",
567 .matches = {
568 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
569 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
570 },
571 },
572 {
573 .callback = init_old_suspend_ordering, 485 .callback = init_old_suspend_ordering,
574 .ident = "Panasonic CF51-2L", 486 .ident = "Panasonic CF51-2L",
575 .matches = { 487 .matches = {
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
index 8ad4ffea6920..6e6b6a11b3ce 100644
--- a/drivers/base/iommu.c
+++ b/drivers/base/iommu.c
@@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
80} 80}
81EXPORT_SYMBOL_GPL(iommu_detach_device); 81EXPORT_SYMBOL_GPL(iommu_detach_device);
82 82
83int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
84 phys_addr_t paddr, size_t size, int prot)
85{
86 return iommu_ops->map(domain, iova, paddr, size, prot);
87}
88EXPORT_SYMBOL_GPL(iommu_map_range);
89
90void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
91 size_t size)
92{
93 iommu_ops->unmap(domain, iova, size);
94}
95EXPORT_SYMBOL_GPL(iommu_unmap_range);
96
97phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 83phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
98 unsigned long iova) 84 unsigned long iova)
99{ 85{
@@ -107,3 +93,32 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
107 return iommu_ops->domain_has_cap(domain, cap); 93 return iommu_ops->domain_has_cap(domain, cap);
108} 94}
109EXPORT_SYMBOL_GPL(iommu_domain_has_cap); 95EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
96
97int iommu_map(struct iommu_domain *domain, unsigned long iova,
98 phys_addr_t paddr, int gfp_order, int prot)
99{
100 unsigned long invalid_mask;
101 size_t size;
102
103 size = 0x1000UL << gfp_order;
104 invalid_mask = size - 1;
105
106 BUG_ON((iova | paddr) & invalid_mask);
107
108 return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
109}
110EXPORT_SYMBOL_GPL(iommu_map);
111
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
113{
114 unsigned long invalid_mask;
115 size_t size;
116
117 size = 0x1000UL << gfp_order;
118 invalid_mask = size - 1;
119
120 BUG_ON(iova & invalid_mask);
121
122 return iommu_ops->unmap(domain, iova, gfp_order);
123}
124EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4b4b565c835f..ada6397c23a5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(platform_device_alloc);
187 * released. 187 * released.
188 */ 188 */
189int platform_device_add_resources(struct platform_device *pdev, 189int platform_device_add_resources(struct platform_device *pdev,
190 struct resource *res, unsigned int num) 190 const struct resource *res, unsigned int num)
191{ 191{
192 struct resource *r; 192 struct resource *r;
193 193
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
367 */ 367 */
368struct platform_device *platform_device_register_simple(const char *name, 368struct platform_device *platform_device_register_simple(const char *name,
369 int id, 369 int id,
370 struct resource *res, 370 const struct resource *res,
371 unsigned int num) 371 unsigned int num)
372{ 372{
373 struct platform_device *pdev; 373 struct platform_device *pdev;
@@ -967,17 +967,17 @@ static int platform_pm_restore_noirq(struct device *dev)
967 967
968int __weak platform_pm_runtime_suspend(struct device *dev) 968int __weak platform_pm_runtime_suspend(struct device *dev)
969{ 969{
970 return -ENOSYS; 970 return pm_generic_runtime_suspend(dev);
971}; 971};
972 972
973int __weak platform_pm_runtime_resume(struct device *dev) 973int __weak platform_pm_runtime_resume(struct device *dev)
974{ 974{
975 return -ENOSYS; 975 return pm_generic_runtime_resume(dev);
976}; 976};
977 977
978int __weak platform_pm_runtime_idle(struct device *dev) 978int __weak platform_pm_runtime_idle(struct device *dev)
979{ 979{
980 return -ENOSYS; 980 return pm_generic_runtime_idle(dev);
981}; 981};
982 982
983#else /* !CONFIG_PM_RUNTIME */ 983#else /* !CONFIG_PM_RUNTIME */
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
1254 } 1254 }
1255 1255
1256 if (match) { 1256 if (match) {
1257 /*
1258 * Set up a sensible init_name to enable
1259 * dev_name() and others to be used before the
1260 * rest of the driver core is initialized.
1261 */
1262 if (!match->dev.init_name && slab_is_available()) {
1263 if (match->id != -1)
1264 match->dev.init_name =
1265 kasprintf(GFP_KERNEL, "%s.%d",
1266 match->name,
1267 match->id);
1268 else
1269 match->dev.init_name =
1270 kasprintf(GFP_KERNEL, "%s",
1271 match->name);
1272
1273 if (!match->dev.init_name)
1274 return -ENOMEM;
1275 }
1276
1257 if (epdrv->pdrv->probe(match)) 1277 if (epdrv->pdrv->probe(match))
1258 pr_warning("%s: unable to probe %s early.\n", 1278 pr_warning("%s: unable to probe %s early.\n",
1259 class_str, match->name); 1279 class_str, match->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 626dd147b75f..b0ec0e9f27e9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -229,14 +229,16 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
229 229
230 if (retval) { 230 if (retval) {
231 dev->power.runtime_status = RPM_ACTIVE; 231 dev->power.runtime_status = RPM_ACTIVE;
232 pm_runtime_cancel_pending(dev);
233
234 if (retval == -EAGAIN || retval == -EBUSY) { 232 if (retval == -EAGAIN || retval == -EBUSY) {
235 notify = true; 233 if (dev->power.timer_expires == 0)
234 notify = true;
236 dev->power.runtime_error = 0; 235 dev->power.runtime_error = 0;
236 } else {
237 pm_runtime_cancel_pending(dev);
237 } 238 }
238 } else { 239 } else {
239 dev->power.runtime_status = RPM_SUSPENDED; 240 dev->power.runtime_status = RPM_SUSPENDED;
241 pm_runtime_deactivate_timer(dev);
240 242
241 if (dev->parent) { 243 if (dev->parent) {
242 parent = dev->parent; 244 parent = dev->parent;
@@ -659,8 +661,6 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
659 661
660 if (dev->power.runtime_status == RPM_SUSPENDED) 662 if (dev->power.runtime_status == RPM_SUSPENDED)
661 retval = 1; 663 retval = 1;
662 else if (dev->power.runtime_status == RPM_SUSPENDING)
663 retval = -EINPROGRESS;
664 else if (atomic_read(&dev->power.usage_count) > 0 664 else if (atomic_read(&dev->power.usage_count) > 0
665 || dev->power.disable_depth > 0) 665 || dev->power.disable_depth > 0)
666 retval = -EAGAIN; 666 retval = -EAGAIN;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 86fd9373447e..a4c33bc51257 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/pm_runtime.h> 7#include <linux/pm_runtime.h>
8#include <asm/atomic.h>
8#include "power.h" 9#include "power.h"
9 10
10/* 11/*
@@ -143,7 +144,59 @@ wake_store(struct device * dev, struct device_attribute *attr,
143 144
144static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 145static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
145 146
146#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG 147#ifdef CONFIG_PM_ADVANCED_DEBUG
148#ifdef CONFIG_PM_RUNTIME
149
150static ssize_t rtpm_usagecount_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
154}
155
156static ssize_t rtpm_children_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 return sprintf(buf, "%d\n", dev->power.ignore_children ?
160 0 : atomic_read(&dev->power.child_count));
161}
162
163static ssize_t rtpm_enabled_show(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
167 return sprintf(buf, "disabled & forbidden\n");
168 else if (dev->power.disable_depth)
169 return sprintf(buf, "disabled\n");
170 else if (dev->power.runtime_auto == false)
171 return sprintf(buf, "forbidden\n");
172 return sprintf(buf, "enabled\n");
173}
174
175static ssize_t rtpm_status_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
178 if (dev->power.runtime_error)
179 return sprintf(buf, "error\n");
180 switch (dev->power.runtime_status) {
181 case RPM_SUSPENDED:
182 return sprintf(buf, "suspended\n");
183 case RPM_SUSPENDING:
184 return sprintf(buf, "suspending\n");
185 case RPM_RESUMING:
186 return sprintf(buf, "resuming\n");
187 case RPM_ACTIVE:
188 return sprintf(buf, "active\n");
189 }
190 return -EIO;
191}
192
193static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
194static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
195static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
196static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
197
198#endif
199
147static ssize_t async_show(struct device *dev, struct device_attribute *attr, 200static ssize_t async_show(struct device *dev, struct device_attribute *attr,
148 char *buf) 201 char *buf)
149{ 202{
@@ -170,15 +223,21 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
170} 223}
171 224
172static DEVICE_ATTR(async, 0644, async_show, async_store); 225static DEVICE_ATTR(async, 0644, async_show, async_store);
173#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */ 226#endif /* CONFIG_PM_ADVANCED_DEBUG */
174 227
175static struct attribute * power_attrs[] = { 228static struct attribute * power_attrs[] = {
176#ifdef CONFIG_PM_RUNTIME 229#ifdef CONFIG_PM_RUNTIME
177 &dev_attr_control.attr, 230 &dev_attr_control.attr,
178#endif 231#endif
179 &dev_attr_wakeup.attr, 232 &dev_attr_wakeup.attr,
180#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG 233#ifdef CONFIG_PM_ADVANCED_DEBUG
181 &dev_attr_async.attr, 234 &dev_attr_async.attr,
235#ifdef CONFIG_PM_RUNTIME
236 &dev_attr_runtime_usage.attr,
237 &dev_attr_runtime_active_kids.attr,
238 &dev_attr_runtime_status.attr,
239 &dev_attr_runtime_enabled.attr,
240#endif
182#endif 241#endif
183 NULL, 242 NULL,
184}; 243};
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0182a22c423a..832798aa14f6 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -66,6 +66,7 @@
66#include <linux/blkdev.h> 66#include <linux/blkdev.h>
67#include <linux/elevator.h> 67#include <linux/elevator.h>
68#include <linux/interrupt.h> 68#include <linux/interrupt.h>
69#include <linux/platform_device.h>
69 70
70#include <asm/setup.h> 71#include <asm/setup.h>
71#include <asm/uaccess.h> 72#include <asm/uaccess.h>
@@ -1696,34 +1697,18 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1696 return get_disk(unit[drive].gendisk); 1697 return get_disk(unit[drive].gendisk);
1697} 1698}
1698 1699
1699static int __init amiga_floppy_init(void) 1700static int __init amiga_floppy_probe(struct platform_device *pdev)
1700{ 1701{
1701 int i, ret; 1702 int i, ret;
1702 1703
1703 if (!MACH_IS_AMIGA)
1704 return -ENODEV;
1705
1706 if (!AMIGAHW_PRESENT(AMI_FLOPPY))
1707 return -ENODEV;
1708
1709 if (register_blkdev(FLOPPY_MAJOR,"fd")) 1704 if (register_blkdev(FLOPPY_MAJOR,"fd"))
1710 return -EBUSY; 1705 return -EBUSY;
1711 1706
1712 /*
1713 * We request DSKPTR, DSKLEN and DSKDATA only, because the other
1714 * floppy registers are too spreaded over the custom register space
1715 */
1716 ret = -EBUSY;
1717 if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
1718 printk("fd: cannot get floppy registers\n");
1719 goto out_blkdev;
1720 }
1721
1722 ret = -ENOMEM; 1707 ret = -ENOMEM;
1723 if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == 1708 if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
1724 NULL) { 1709 NULL) {
1725 printk("fd: cannot get chip mem buffer\n"); 1710 printk("fd: cannot get chip mem buffer\n");
1726 goto out_memregion; 1711 goto out_blkdev;
1727 } 1712 }
1728 1713
1729 ret = -EBUSY; 1714 ret = -EBUSY;
@@ -1792,18 +1777,13 @@ out_irq2:
1792 free_irq(IRQ_AMIGA_DSKBLK, NULL); 1777 free_irq(IRQ_AMIGA_DSKBLK, NULL);
1793out_irq: 1778out_irq:
1794 amiga_chip_free(raw_buf); 1779 amiga_chip_free(raw_buf);
1795out_memregion:
1796 release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
1797out_blkdev: 1780out_blkdev:
1798 unregister_blkdev(FLOPPY_MAJOR,"fd"); 1781 unregister_blkdev(FLOPPY_MAJOR,"fd");
1799 return ret; 1782 return ret;
1800} 1783}
1801 1784
1802module_init(amiga_floppy_init);
1803#ifdef MODULE
1804
1805#if 0 /* not safe to unload */ 1785#if 0 /* not safe to unload */
1806void cleanup_module(void) 1786static int __exit amiga_floppy_remove(struct platform_device *pdev)
1807{ 1787{
1808 int i; 1788 int i;
1809 1789
@@ -1820,12 +1800,25 @@ void cleanup_module(void)
1820 custom.dmacon = DMAF_DISK; /* disable DMA */ 1800 custom.dmacon = DMAF_DISK; /* disable DMA */
1821 amiga_chip_free(raw_buf); 1801 amiga_chip_free(raw_buf);
1822 blk_cleanup_queue(floppy_queue); 1802 blk_cleanup_queue(floppy_queue);
1823 release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
1824 unregister_blkdev(FLOPPY_MAJOR, "fd"); 1803 unregister_blkdev(FLOPPY_MAJOR, "fd");
1825} 1804}
1826#endif 1805#endif
1827 1806
1828#else 1807static struct platform_driver amiga_floppy_driver = {
1808 .driver = {
1809 .name = "amiga-floppy",
1810 .owner = THIS_MODULE,
1811 },
1812};
1813
1814static int __init amiga_floppy_init(void)
1815{
1816 return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe);
1817}
1818
1819module_init(amiga_floppy_init);
1820
1821#ifndef MODULE
1829static int __init amiga_floppy_setup (char *str) 1822static int __init amiga_floppy_setup (char *str)
1830{ 1823{
1831 int n; 1824 int n;
@@ -1840,3 +1833,5 @@ static int __init amiga_floppy_setup (char *str)
1840 1833
1841__setup("floppy=", amiga_floppy_setup); 1834__setup("floppy=", amiga_floppy_setup);
1842#endif 1835#endif
1836
1837MODULE_ALIAS("platform:amiga-floppy");
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 034e6dfc878c..81c78b3ce2df 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -164,12 +164,12 @@ unsigned long read_timer(void)
164 unsigned long t, flags; 164 unsigned long t, flags;
165 int i; 165 int i;
166 166
167 spin_lock_irqsave(&i8253_lock, flags); 167 raw_spin_lock_irqsave(&i8253_lock, flags);
168 t = jiffies * 11932; 168 t = jiffies * 11932;
169 outb_p(0, 0x43); 169 outb_p(0, 0x43);
170 i = inb_p(0x40); 170 i = inb_p(0x40);
171 i |= inb(0x40) << 8; 171 i |= inb(0x40) << 8;
172 spin_unlock_irqrestore(&i8253_lock, flags); 172 raw_spin_unlock_irqrestore(&i8253_lock, flags);
173 return(t - i); 173 return(t - i);
174} 174}
175#endif 175#endif
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index fc8cf7ac7f2b..4cd8b227c11f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -23,6 +23,7 @@
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/dmi.h> 24#include <linux/dmi.h>
25#include <linux/capability.h> 25#include <linux/capability.h>
26#include <linux/smp_lock.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/io.h> 28#include <asm/io.h>
28 29
@@ -82,8 +83,7 @@ module_param(fan_mult, int, 0);
82MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with"); 83MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
83 84
84static int i8k_open_fs(struct inode *inode, struct file *file); 85static int i8k_open_fs(struct inode *inode, struct file *file);
85static int i8k_ioctl(struct inode *, struct file *, unsigned int, 86static long i8k_ioctl(struct file *, unsigned int, unsigned long);
86 unsigned long);
87 87
88static const struct file_operations i8k_fops = { 88static const struct file_operations i8k_fops = {
89 .owner = THIS_MODULE, 89 .owner = THIS_MODULE,
@@ -91,7 +91,7 @@ static const struct file_operations i8k_fops = {
91 .read = seq_read, 91 .read = seq_read,
92 .llseek = seq_lseek, 92 .llseek = seq_lseek,
93 .release = single_release, 93 .release = single_release,
94 .ioctl = i8k_ioctl, 94 .unlocked_ioctl = i8k_ioctl,
95}; 95};
96 96
97struct smm_regs { 97struct smm_regs {
@@ -307,8 +307,8 @@ static int i8k_get_dell_signature(int req_fn)
307 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; 307 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
308} 308}
309 309
310static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, 310static int
311 unsigned long arg) 311i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
312{ 312{
313 int val = 0; 313 int val = 0;
314 int speed; 314 int speed;
@@ -395,6 +395,17 @@ static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
395 return 0; 395 return 0;
396} 396}
397 397
398static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
399{
400 long ret;
401
402 lock_kernel();
403 ret = i8k_ioctl_unlocked(fp, cmd, arg);
404 unlock_kernel();
405
406 return ret;
407}
408
398/* 409/*
399 * Print the information for /proc/i8k. 410 * Print the information for /proc/i8k.
400 */ 411 */
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 8dfd24721a82..78a62ebe75c7 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -627,7 +627,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
627 char data; 627 char data;
628 int char_count; 628 int char_count;
629 int save_cnt; 629 int save_cnt;
630 int len;
631 630
632 /* determine the channel and change to that context */ 631 /* determine the channel and change to that context */
633 channel = (u_short) (base_addr[CyLICR] >> 2); 632 channel = (u_short) (base_addr[CyLICR] >> 2);
@@ -1528,7 +1527,6 @@ static int
1528cy_ioctl(struct tty_struct *tty, struct file *file, 1527cy_ioctl(struct tty_struct *tty, struct file *file,
1529 unsigned int cmd, unsigned long arg) 1528 unsigned int cmd, unsigned long arg)
1530{ 1529{
1531 unsigned long val;
1532 struct cyclades_port *info = tty->driver_data; 1530 struct cyclades_port *info = tty->driver_data;
1533 int ret_val = 0; 1531 int ret_val = 0;
1534 void __user *argp = (void __user *)arg; 1532 void __user *argp = (void __user *)arg;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 59de2525d303..d4e8b213a462 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -289,7 +289,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
289 289
290static void sysrq_ftrace_dump(int key, struct tty_struct *tty) 290static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
291{ 291{
292 ftrace_dump(); 292 ftrace_dump(DUMP_ALL);
293} 293}
294static struct sysrq_key_op sysrq_ftrace_dump_op = { 294static struct sysrq_key_op sysrq_ftrace_dump_op = {
295 .handler = sysrq_ftrace_dump, 295 .handler = sysrq_ftrace_dump,
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f5fc64f89c5c..4dc338f3d1aa 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -17,14 +17,16 @@ menuconfig TCG_TPM
17 obtained at: <http://sourceforge.net/projects/trousers>. To 17 obtained at: <http://sourceforge.net/projects/trousers>. To
18 compile this driver as a module, choose M here; the module 18 compile this driver as a module, choose M here; the module
19 will be called tpm. If unsure, say N. 19 will be called tpm. If unsure, say N.
20 Note: For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI 20 Notes:
21 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
21 and CONFIG_PNPACPI. 22 and CONFIG_PNPACPI.
23 2) Without ACPI enabled, the BIOS event log won't be accessible,
24 which is required to validate the PCR 0-7 values.
22 25
23if TCG_TPM 26if TCG_TPM
24 27
25config TCG_TIS 28config TCG_TIS
26 tristate "TPM Interface Specification 1.2 Interface" 29 tristate "TPM Interface Specification 1.2 Interface"
27 depends on PNP
28 ---help--- 30 ---help---
29 If you have a TPM security chip that is compliant with the 31 If you have a TPM security chip that is compliant with the
30 TCG TIS 1.2 TPM specification say Yes and it will be accessible 32 TCG TIS 1.2 TPM specification say Yes and it will be accessible
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 068c816e6942..05ad4a17a28f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1068,6 +1068,27 @@ void tpm_remove_hardware(struct device *dev)
1068} 1068}
1069EXPORT_SYMBOL_GPL(tpm_remove_hardware); 1069EXPORT_SYMBOL_GPL(tpm_remove_hardware);
1070 1070
1071#define TPM_ORD_SAVESTATE cpu_to_be32(152)
1072#define SAVESTATE_RESULT_SIZE 10
1073
1074static struct tpm_input_header savestate_header = {
1075 .tag = TPM_TAG_RQU_COMMAND,
1076 .length = cpu_to_be32(10),
1077 .ordinal = TPM_ORD_SAVESTATE
1078};
1079
1080/* Bug workaround - some TPM's don't flush the most
1081 * recently changed pcr on suspend, so force the flush
1082 * with an extend to the selected _unused_ non-volatile pcr.
1083 */
1084static int tpm_suspend_pcr;
1085static int __init tpm_suspend_setup(char *str)
1086{
1087 get_option(&str, &tpm_suspend_pcr);
1088 return 1;
1089}
1090__setup("tpm_suspend_pcr=", tpm_suspend_setup);
1091
1071/* 1092/*
1072 * We are about to suspend. Save the TPM state 1093 * We are about to suspend. Save the TPM state
1073 * so that it can be restored. 1094 * so that it can be restored.
@@ -1075,17 +1096,29 @@ EXPORT_SYMBOL_GPL(tpm_remove_hardware);
1075int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) 1096int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
1076{ 1097{
1077 struct tpm_chip *chip = dev_get_drvdata(dev); 1098 struct tpm_chip *chip = dev_get_drvdata(dev);
1078 u8 savestate[] = { 1099 struct tpm_cmd_t cmd;
1079 0, 193, /* TPM_TAG_RQU_COMMAND */ 1100 int rc;
1080 0, 0, 0, 10, /* blob length (in bytes) */ 1101
1081 0, 0, 0, 152 /* TPM_ORD_SaveState */ 1102 u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
1082 };
1083 1103
1084 if (chip == NULL) 1104 if (chip == NULL)
1085 return -ENODEV; 1105 return -ENODEV;
1086 1106
1087 tpm_transmit(chip, savestate, sizeof(savestate)); 1107 /* for buggy tpm, flush pcrs with extend to selected dummy */
1088 return 0; 1108 if (tpm_suspend_pcr) {
1109 cmd.header.in = pcrextend_header;
1110 cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
1111 memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
1112 TPM_DIGEST_SIZE);
1113 rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
1114 "extending dummy pcr before suspend");
1115 }
1116
1117 /* now do the actual savestate */
1118 cmd.header.in = savestate_header;
1119 rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
1120 "sending savestate before suspend");
1121 return rc;
1089} 1122}
1090EXPORT_SYMBOL_GPL(tpm_pm_suspend); 1123EXPORT_SYMBOL_GPL(tpm_pm_suspend);
1091 1124
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 94345994f8a6..24314a9cffe8 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -598,7 +598,7 @@ out_err:
598 tpm_remove_hardware(chip->dev); 598 tpm_remove_hardware(chip->dev);
599 return rc; 599 return rc;
600} 600}
601 601#ifdef CONFIG_PNP
602static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 602static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
603 const struct pnp_device_id *pnp_id) 603 const struct pnp_device_id *pnp_id)
604{ 604{
@@ -663,7 +663,7 @@ static struct pnp_driver tis_pnp_driver = {
663module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, 663module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
664 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 664 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
665MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 665MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
666 666#endif
667static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) 667static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
668{ 668{
669 return tpm_pm_suspend(&dev->dev, msg); 669 return tpm_pm_suspend(&dev->dev, msg);
@@ -690,21 +690,21 @@ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
690static int __init init_tis(void) 690static int __init init_tis(void)
691{ 691{
692 int rc; 692 int rc;
693#ifdef CONFIG_PNP
694 if (!force)
695 return pnp_register_driver(&tis_pnp_driver);
696#endif
693 697
694 if (force) { 698 rc = platform_driver_register(&tis_drv);
695 rc = platform_driver_register(&tis_drv); 699 if (rc < 0)
696 if (rc < 0)
697 return rc;
698 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
699 return PTR_ERR(pdev);
700 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
701 platform_device_unregister(pdev);
702 platform_driver_unregister(&tis_drv);
703 }
704 return rc; 700 return rc;
701 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
702 return PTR_ERR(pdev);
703 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
704 platform_device_unregister(pdev);
705 platform_driver_unregister(&tis_drv);
705 } 706 }
706 707 return rc;
707 return pnp_register_driver(&tis_pnp_driver);
708} 708}
709 709
710static void __exit cleanup_tis(void) 710static void __exit cleanup_tis(void)
@@ -728,12 +728,14 @@ static void __exit cleanup_tis(void)
728 list_del(&i->list); 728 list_del(&i->list);
729 } 729 }
730 spin_unlock(&tis_lock); 730 spin_unlock(&tis_lock);
731 731#ifdef CONFIG_PNP
732 if (force) { 732 if (!force) {
733 platform_device_unregister(pdev);
734 platform_driver_unregister(&tis_drv);
735 } else
736 pnp_unregister_driver(&tis_pnp_driver); 733 pnp_unregister_driver(&tis_pnp_driver);
734 return;
735 }
736#endif
737 platform_device_unregister(pdev);
738 platform_driver_unregister(&tis_drv);
737} 739}
738 740
739module_init(init_tis); 741module_init(init_tis);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 6da962c9b21c..d71f0fc34b46 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1875,6 +1875,7 @@ got_driver:
1875 */ 1875 */
1876 if (filp->f_op == &hung_up_tty_fops) 1876 if (filp->f_op == &hung_up_tty_fops)
1877 filp->f_op = &tty_fops; 1877 filp->f_op = &tty_fops;
1878 unlock_kernel();
1878 goto retry_open; 1879 goto retry_open;
1879 } 1880 }
1880 unlock_kernel(); 1881 unlock_kernel();
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index b314a999aabe..d7be69f13154 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -154,14 +154,14 @@ static int __init cs5535_mfgpt_init(void)
154 if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) { 154 if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
155 printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n", 155 printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
156 timer_irq); 156 timer_irq);
157 return -EIO; 157 goto err_timer;
158 } 158 }
159 159
160 /* And register it with the kernel */ 160 /* And register it with the kernel */
161 ret = setup_irq(timer_irq, &mfgptirq); 161 ret = setup_irq(timer_irq, &mfgptirq);
162 if (ret) { 162 if (ret) {
163 printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n"); 163 printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
164 goto err; 164 goto err_irq;
165 } 165 }
166 166
167 /* Set the clock scale and enable the event mode for CMP2 */ 167 /* Set the clock scale and enable the event mode for CMP2 */
@@ -184,8 +184,10 @@ static int __init cs5535_mfgpt_init(void)
184 184
185 return 0; 185 return 0;
186 186
187err: 187err_irq:
188 cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq); 188 cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
189err_timer:
190 cs5535_mfgpt_free_timer(cs5535_event_clock);
189 printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n"); 191 printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
190 return -EIO; 192 return -EIO;
191} 193}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84b..f6677cb19789 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
150 150
151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
152{ 152{
153 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
154 int ret; 153 int ret;
155 154
156 /* enable clock */ 155 /* enable clock */
157 ret = clk_enable(p->clk); 156 ret = clk_enable(p->clk);
158 if (ret) { 157 if (ret) {
159 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); 158 dev_err(&p->pdev->dev, "cannot enable clock\n");
160 return ret; 159 return ret;
161 } 160 }
162 161
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
279 delay = 1; 278 delay = 1;
280 279
281 if (!delay) 280 if (!delay)
282 pr_warning("sh_cmt: too long delay\n"); 281 dev_warn(&p->pdev->dev, "too long delay\n");
283 282
284 } while (delay); 283 } while (delay);
285} 284}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
289 unsigned long flags; 288 unsigned long flags;
290 289
291 if (delta > p->max_match_value) 290 if (delta > p->max_match_value)
292 pr_warning("sh_cmt: delta out of range\n"); 291 dev_warn(&p->pdev->dev, "delta out of range\n");
293 292
294 spin_lock_irqsave(&p->lock, flags); 293 spin_lock_irqsave(&p->lock, flags);
295 p->next_match_value = delta; 294 p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
451 cs->resume = sh_cmt_clocksource_resume; 450 cs->resume = sh_cmt_clocksource_resume;
452 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 451 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
453 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 452 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
454 pr_info("sh_cmt: %s used as clock source\n", cs->name); 453 dev_info(&p->pdev->dev, "used as clock source\n");
455 clocksource_register(cs); 454 clocksource_register(cs);
456 return 0; 455 return 0;
457} 456}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
497 496
498 switch (mode) { 497 switch (mode) {
499 case CLOCK_EVT_MODE_PERIODIC: 498 case CLOCK_EVT_MODE_PERIODIC:
500 pr_info("sh_cmt: %s used for periodic clock events\n", 499 dev_info(&p->pdev->dev, "used for periodic clock events\n");
501 ced->name);
502 sh_cmt_clock_event_start(p, 1); 500 sh_cmt_clock_event_start(p, 1);
503 break; 501 break;
504 case CLOCK_EVT_MODE_ONESHOT: 502 case CLOCK_EVT_MODE_ONESHOT:
505 pr_info("sh_cmt: %s used for oneshot clock events\n", 503 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
506 ced->name);
507 sh_cmt_clock_event_start(p, 0); 504 sh_cmt_clock_event_start(p, 0);
508 break; 505 break;
509 case CLOCK_EVT_MODE_SHUTDOWN: 506 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
544 ced->set_next_event = sh_cmt_clock_event_next; 541 ced->set_next_event = sh_cmt_clock_event_next;
545 ced->set_mode = sh_cmt_clock_event_mode; 542 ced->set_mode = sh_cmt_clock_event_mode;
546 543
547 pr_info("sh_cmt: %s used for clock events\n", ced->name); 544 dev_info(&p->pdev->dev, "used for clock events\n");
548 clockevents_register_device(ced); 545 clockevents_register_device(ced);
549} 546}
550 547
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
601 /* map memory, let mapbase point to our channel */ 598 /* map memory, let mapbase point to our channel */
602 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 599 p->mapbase = ioremap_nocache(res->start, resource_size(res));
603 if (p->mapbase == NULL) { 600 if (p->mapbase == NULL) {
604 pr_err("sh_cmt: failed to remap I/O memory\n"); 601 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
605 goto err0; 602 goto err0;
606 } 603 }
607 604
608 /* request irq using setup_irq() (too early for request_irq()) */ 605 /* request irq using setup_irq() (too early for request_irq()) */
609 p->irqaction.name = cfg->name; 606 p->irqaction.name = dev_name(&p->pdev->dev);
610 p->irqaction.handler = sh_cmt_interrupt; 607 p->irqaction.handler = sh_cmt_interrupt;
611 p->irqaction.dev_id = p; 608 p->irqaction.dev_id = p;
612 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 609 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
610 IRQF_IRQPOLL | IRQF_NOBALANCING;
613 611
614 /* get hold of clock */ 612 /* get hold of clock */
615 p->clk = clk_get(&p->pdev->dev, cfg->clk); 613 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
616 if (IS_ERR(p->clk)) { 614 if (IS_ERR(p->clk)) {
617 pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); 615 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
618 ret = PTR_ERR(p->clk); 616 p->clk = clk_get(&p->pdev->dev, cfg->clk);
619 goto err1; 617 if (IS_ERR(p->clk)) {
618 dev_err(&p->pdev->dev, "cannot get clock\n");
619 ret = PTR_ERR(p->clk);
620 goto err1;
621 }
620 } 622 }
621 623
622 if (resource_size(res) == 6) { 624 if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
629 p->clear_bits = ~0xc000; 631 p->clear_bits = ~0xc000;
630 } 632 }
631 633
632 ret = sh_cmt_register(p, cfg->name, 634 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
633 cfg->clockevent_rating, 635 cfg->clockevent_rating,
634 cfg->clocksource_rating); 636 cfg->clocksource_rating);
635 if (ret) { 637 if (ret) {
636 pr_err("sh_cmt: registration failed\n"); 638 dev_err(&p->pdev->dev, "registration failed\n");
637 goto err1; 639 goto err1;
638 } 640 }
639 641
640 ret = setup_irq(irq, &p->irqaction); 642 ret = setup_irq(irq, &p->irqaction);
641 if (ret) { 643 if (ret) {
642 pr_err("sh_cmt: failed to request irq %d\n", irq); 644 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
643 goto err1; 645 goto err1;
644 } 646 }
645 647
@@ -654,11 +656,10 @@ err0:
654static int __devinit sh_cmt_probe(struct platform_device *pdev) 656static int __devinit sh_cmt_probe(struct platform_device *pdev)
655{ 657{
656 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 658 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
657 struct sh_timer_config *cfg = pdev->dev.platform_data;
658 int ret; 659 int ret;
659 660
660 if (p) { 661 if (p) {
661 pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name); 662 dev_info(&pdev->dev, "kept as earlytimer\n");
662 return 0; 663 return 0;
663 } 664 }
664 665
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73bb..ef7a5be8a09f 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
119 119
120static int sh_mtu2_enable(struct sh_mtu2_priv *p) 120static int sh_mtu2_enable(struct sh_mtu2_priv *p)
121{ 121{
122 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
123 int ret; 122 int ret;
124 123
125 /* enable clock */ 124 /* enable clock */
126 ret = clk_enable(p->clk); 125 ret = clk_enable(p->clk);
127 if (ret) { 126 if (ret) {
128 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk); 127 dev_err(&p->pdev->dev, "cannot enable clock\n");
129 return ret; 128 return ret;
130 } 129 }
131 130
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
194 193
195 switch (mode) { 194 switch (mode) {
196 case CLOCK_EVT_MODE_PERIODIC: 195 case CLOCK_EVT_MODE_PERIODIC:
197 pr_info("sh_mtu2: %s used for periodic clock events\n", 196 dev_info(&p->pdev->dev, "used for periodic clock events\n");
198 ced->name);
199 sh_mtu2_enable(p); 197 sh_mtu2_enable(p);
200 break; 198 break;
201 case CLOCK_EVT_MODE_UNUSED: 199 case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
222 ced->cpumask = cpumask_of(0); 220 ced->cpumask = cpumask_of(0);
223 ced->set_mode = sh_mtu2_clock_event_mode; 221 ced->set_mode = sh_mtu2_clock_event_mode;
224 222
225 pr_info("sh_mtu2: %s used for clock events\n", ced->name); 223 dev_info(&p->pdev->dev, "used for clock events\n");
226 clockevents_register_device(ced); 224 clockevents_register_device(ced);
227 225
228 ret = setup_irq(p->irqaction.irq, &p->irqaction); 226 ret = setup_irq(p->irqaction.irq, &p->irqaction);
229 if (ret) { 227 if (ret) {
230 pr_err("sh_mtu2: failed to request irq %d\n", 228 dev_err(&p->pdev->dev, "failed to request irq %d\n",
231 p->irqaction.irq); 229 p->irqaction.irq);
232 return; 230 return;
233 } 231 }
234} 232}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
274 /* map memory, let mapbase point to our channel */ 272 /* map memory, let mapbase point to our channel */
275 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 273 p->mapbase = ioremap_nocache(res->start, resource_size(res));
276 if (p->mapbase == NULL) { 274 if (p->mapbase == NULL) {
277 pr_err("sh_mtu2: failed to remap I/O memory\n"); 275 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
278 goto err0; 276 goto err0;
279 } 277 }
280 278
281 /* setup data for setup_irq() (too early for request_irq()) */ 279 /* setup data for setup_irq() (too early for request_irq()) */
282 p->irqaction.name = cfg->name; 280 p->irqaction.name = dev_name(&p->pdev->dev);
283 p->irqaction.handler = sh_mtu2_interrupt; 281 p->irqaction.handler = sh_mtu2_interrupt;
284 p->irqaction.dev_id = p; 282 p->irqaction.dev_id = p;
285 p->irqaction.irq = irq; 283 p->irqaction.irq = irq;
286 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 284 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
285 IRQF_IRQPOLL | IRQF_NOBALANCING;
287 286
288 /* get hold of clock */ 287 /* get hold of clock */
289 p->clk = clk_get(&p->pdev->dev, cfg->clk); 288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
290 if (IS_ERR(p->clk)) { 289 if (IS_ERR(p->clk)) {
291 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk); 290 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
292 ret = PTR_ERR(p->clk); 291 p->clk = clk_get(&p->pdev->dev, cfg->clk);
293 goto err1; 292 if (IS_ERR(p->clk)) {
293 dev_err(&p->pdev->dev, "cannot get clock\n");
294 ret = PTR_ERR(p->clk);
295 goto err1;
296 }
294 } 297 }
295 298
296 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating); 299 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
300 cfg->clockevent_rating);
297 err1: 301 err1:
298 iounmap(p->mapbase); 302 iounmap(p->mapbase);
299 err0: 303 err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
303static int __devinit sh_mtu2_probe(struct platform_device *pdev) 307static int __devinit sh_mtu2_probe(struct platform_device *pdev)
304{ 308{
305 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 309 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
306 struct sh_timer_config *cfg = pdev->dev.platform_data;
307 int ret; 310 int ret;
308 311
309 if (p) { 312 if (p) {
310 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name); 313 dev_info(&pdev->dev, "kept as earlytimer\n");
311 return 0; 314 return 0;
312 } 315 }
313 316
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b770..8e44e14ec4c2 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
107 107
108static int sh_tmu_enable(struct sh_tmu_priv *p) 108static int sh_tmu_enable(struct sh_tmu_priv *p)
109{ 109{
110 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
111 int ret; 110 int ret;
112 111
113 /* enable clock */ 112 /* enable clock */
114 ret = clk_enable(p->clk); 113 ret = clk_enable(p->clk);
115 if (ret) { 114 if (ret) {
116 pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk); 115 dev_err(&p->pdev->dev, "cannot enable clock\n");
117 return ret; 116 return ret;
118 } 117 }
119 118
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
229 cs->disable = sh_tmu_clocksource_disable; 228 cs->disable = sh_tmu_clocksource_disable;
230 cs->mask = CLOCKSOURCE_MASK(32); 229 cs->mask = CLOCKSOURCE_MASK(32);
231 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 230 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
232 pr_info("sh_tmu: %s used as clock source\n", cs->name); 231 dev_info(&p->pdev->dev, "used as clock source\n");
233 clocksource_register(cs); 232 clocksource_register(cs);
234 return 0; 233 return 0;
235} 234}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
277 276
278 switch (mode) { 277 switch (mode) {
279 case CLOCK_EVT_MODE_PERIODIC: 278 case CLOCK_EVT_MODE_PERIODIC:
280 pr_info("sh_tmu: %s used for periodic clock events\n", 279 dev_info(&p->pdev->dev, "used for periodic clock events\n");
281 ced->name);
282 sh_tmu_clock_event_start(p, 1); 280 sh_tmu_clock_event_start(p, 1);
283 break; 281 break;
284 case CLOCK_EVT_MODE_ONESHOT: 282 case CLOCK_EVT_MODE_ONESHOT:
285 pr_info("sh_tmu: %s used for oneshot clock events\n", 283 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
286 ced->name);
287 sh_tmu_clock_event_start(p, 0); 284 sh_tmu_clock_event_start(p, 0);
288 break; 285 break;
289 case CLOCK_EVT_MODE_UNUSED: 286 case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
324 ced->set_next_event = sh_tmu_clock_event_next; 321 ced->set_next_event = sh_tmu_clock_event_next;
325 ced->set_mode = sh_tmu_clock_event_mode; 322 ced->set_mode = sh_tmu_clock_event_mode;
326 323
327 pr_info("sh_tmu: %s used for clock events\n", ced->name); 324 dev_info(&p->pdev->dev, "used for clock events\n");
328 clockevents_register_device(ced); 325 clockevents_register_device(ced);
329 326
330 ret = setup_irq(p->irqaction.irq, &p->irqaction); 327 ret = setup_irq(p->irqaction.irq, &p->irqaction);
331 if (ret) { 328 if (ret) {
332 pr_err("sh_tmu: failed to request irq %d\n", 329 dev_err(&p->pdev->dev, "failed to request irq %d\n",
333 p->irqaction.irq); 330 p->irqaction.irq);
334 return; 331 return;
335 } 332 }
336} 333}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
379 /* map memory, let mapbase point to our channel */ 376 /* map memory, let mapbase point to our channel */
380 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 377 p->mapbase = ioremap_nocache(res->start, resource_size(res));
381 if (p->mapbase == NULL) { 378 if (p->mapbase == NULL) {
382 pr_err("sh_tmu: failed to remap I/O memory\n"); 379 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
383 goto err0; 380 goto err0;
384 } 381 }
385 382
386 /* setup data for setup_irq() (too early for request_irq()) */ 383 /* setup data for setup_irq() (too early for request_irq()) */
387 p->irqaction.name = cfg->name; 384 p->irqaction.name = dev_name(&p->pdev->dev);
388 p->irqaction.handler = sh_tmu_interrupt; 385 p->irqaction.handler = sh_tmu_interrupt;
389 p->irqaction.dev_id = p; 386 p->irqaction.dev_id = p;
390 p->irqaction.irq = irq; 387 p->irqaction.irq = irq;
391 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 388 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
389 IRQF_IRQPOLL | IRQF_NOBALANCING;
392 390
393 /* get hold of clock */ 391 /* get hold of clock */
394 p->clk = clk_get(&p->pdev->dev, cfg->clk); 392 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
395 if (IS_ERR(p->clk)) { 393 if (IS_ERR(p->clk)) {
396 pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk); 394 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
397 ret = PTR_ERR(p->clk); 395 p->clk = clk_get(&p->pdev->dev, cfg->clk);
398 goto err1; 396 if (IS_ERR(p->clk)) {
397 dev_err(&p->pdev->dev, "cannot get clock\n");
398 ret = PTR_ERR(p->clk);
399 goto err1;
400 }
399 } 401 }
400 402
401 return sh_tmu_register(p, cfg->name, 403 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
402 cfg->clockevent_rating, 404 cfg->clockevent_rating,
403 cfg->clocksource_rating); 405 cfg->clocksource_rating);
404 err1: 406 err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
410static int __devinit sh_tmu_probe(struct platform_device *pdev) 412static int __devinit sh_tmu_probe(struct platform_device *pdev)
411{ 413{
412 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 414 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
413 struct sh_timer_config *cfg = pdev->dev.platform_data;
414 int ret; 415 int ret;
415 416
416 if (p) { 417 if (p) {
417 pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name); 418 dev_info(&pdev->dev, "kept as earlytimer\n");
418 return 0; 419 return 0;
419 } 420 }
420 421
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 75d293eeb3ee..063b2184caf5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -662,32 +662,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663} 663}
664 664
665#define define_one_ro(_name) \ 665cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666static struct freq_attr _name = \ 666cpufreq_freq_attr_ro(cpuinfo_min_freq);
667__ATTR(_name, 0444, show_##_name, NULL) 667cpufreq_freq_attr_ro(cpuinfo_max_freq);
668 668cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669#define define_one_ro0400(_name) \ 669cpufreq_freq_attr_ro(scaling_available_governors);
670static struct freq_attr _name = \ 670cpufreq_freq_attr_ro(scaling_driver);
671__ATTR(_name, 0400, show_##_name, NULL) 671cpufreq_freq_attr_ro(scaling_cur_freq);
672 672cpufreq_freq_attr_ro(bios_limit);
673#define define_one_rw(_name) \ 673cpufreq_freq_attr_ro(related_cpus);
674static struct freq_attr _name = \ 674cpufreq_freq_attr_ro(affected_cpus);
675__ATTR(_name, 0644, show_##_name, store_##_name) 675cpufreq_freq_attr_rw(scaling_min_freq);
676 676cpufreq_freq_attr_rw(scaling_max_freq);
677define_one_ro0400(cpuinfo_cur_freq); 677cpufreq_freq_attr_rw(scaling_governor);
678define_one_ro(cpuinfo_min_freq); 678cpufreq_freq_attr_rw(scaling_setspeed);
679define_one_ro(cpuinfo_max_freq);
680define_one_ro(cpuinfo_transition_latency);
681define_one_ro(scaling_available_governors);
682define_one_ro(scaling_driver);
683define_one_ro(scaling_cur_freq);
684define_one_ro(bios_limit);
685define_one_ro(related_cpus);
686define_one_ro(affected_cpus);
687define_one_rw(scaling_min_freq);
688define_one_rw(scaling_max_freq);
689define_one_rw(scaling_governor);
690define_one_rw(scaling_setspeed);
691 679
692static struct attribute *default_attrs[] = { 680static struct attribute *default_attrs[] = {
693 &cpuinfo_min_freq.attr, 681 &cpuinfo_min_freq.attr,
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 3a147874a465..526bfbf69611 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
178 return sprintf(buf, "%u\n", min_sampling_rate); 178 return sprintf(buf, "%u\n", min_sampling_rate);
179} 179}
180 180
181#define define_one_ro(_name) \ 181define_one_global_ro(sampling_rate_max);
182static struct global_attr _name = \ 182define_one_global_ro(sampling_rate_min);
183__ATTR(_name, 0444, show_##_name, NULL)
184
185define_one_ro(sampling_rate_max);
186define_one_ro(sampling_rate_min);
187 183
188/* cpufreq_conservative Governor Tunables */ 184/* cpufreq_conservative Governor Tunables */
189#define show_one(file_name, object) \ 185#define show_one(file_name, object) \
@@ -221,12 +217,8 @@ show_one_old(freq_step);
221show_one_old(sampling_rate_min); 217show_one_old(sampling_rate_min);
222show_one_old(sampling_rate_max); 218show_one_old(sampling_rate_max);
223 219
224#define define_one_ro_old(object, _name) \ 220cpufreq_freq_attr_ro_old(sampling_rate_min);
225static struct freq_attr object = \ 221cpufreq_freq_attr_ro_old(sampling_rate_max);
226__ATTR(_name, 0444, show_##_name##_old, NULL)
227
228define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
229define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
230 222
231/*** delete after deprecation time ***/ 223/*** delete after deprecation time ***/
232 224
@@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
364 return count; 356 return count;
365} 357}
366 358
367#define define_one_rw(_name) \ 359define_one_global_rw(sampling_rate);
368static struct global_attr _name = \ 360define_one_global_rw(sampling_down_factor);
369__ATTR(_name, 0644, show_##_name, store_##_name) 361define_one_global_rw(up_threshold);
370 362define_one_global_rw(down_threshold);
371define_one_rw(sampling_rate); 363define_one_global_rw(ignore_nice_load);
372define_one_rw(sampling_down_factor); 364define_one_global_rw(freq_step);
373define_one_rw(up_threshold);
374define_one_rw(down_threshold);
375define_one_rw(ignore_nice_load);
376define_one_rw(freq_step);
377 365
378static struct attribute *dbs_attributes[] = { 366static struct attribute *dbs_attributes[] = {
379 &sampling_rate_max.attr, 367 &sampling_rate_max.attr,
@@ -409,16 +397,12 @@ write_one_old(down_threshold);
409write_one_old(ignore_nice_load); 397write_one_old(ignore_nice_load);
410write_one_old(freq_step); 398write_one_old(freq_step);
411 399
412#define define_one_rw_old(object, _name) \ 400cpufreq_freq_attr_rw_old(sampling_rate);
413static struct freq_attr object = \ 401cpufreq_freq_attr_rw_old(sampling_down_factor);
414__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) 402cpufreq_freq_attr_rw_old(up_threshold);
415 403cpufreq_freq_attr_rw_old(down_threshold);
416define_one_rw_old(sampling_rate_old, sampling_rate); 404cpufreq_freq_attr_rw_old(ignore_nice_load);
417define_one_rw_old(sampling_down_factor_old, sampling_down_factor); 405cpufreq_freq_attr_rw_old(freq_step);
418define_one_rw_old(up_threshold_old, up_threshold);
419define_one_rw_old(down_threshold_old, down_threshold);
420define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
421define_one_rw_old(freq_step_old, freq_step);
422 406
423static struct attribute *dbs_attributes_old[] = { 407static struct attribute *dbs_attributes_old[] = {
424 &sampling_rate_max_old.attr, 408 &sampling_rate_max_old.attr,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bd444dc93cf2..e1314212d8d4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
73 73
74struct cpu_dbs_info_s { 74struct cpu_dbs_info_s {
75 cputime64_t prev_cpu_idle; 75 cputime64_t prev_cpu_idle;
76 cputime64_t prev_cpu_iowait;
76 cputime64_t prev_cpu_wall; 77 cputime64_t prev_cpu_wall;
77 cputime64_t prev_cpu_nice; 78 cputime64_t prev_cpu_nice;
78 struct cpufreq_policy *cur_policy; 79 struct cpufreq_policy *cur_policy;
@@ -108,6 +109,7 @@ static struct dbs_tuners {
108 unsigned int down_differential; 109 unsigned int down_differential;
109 unsigned int ignore_nice; 110 unsigned int ignore_nice;
110 unsigned int powersave_bias; 111 unsigned int powersave_bias;
112 unsigned int io_is_busy;
111} dbs_tuners_ins = { 113} dbs_tuners_ins = {
112 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 114 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
113 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, 115 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -148,6 +150,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
148 return idle_time; 150 return idle_time;
149} 151}
150 152
153static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
154{
155 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
156
157 if (iowait_time == -1ULL)
158 return 0;
159
160 return iowait_time;
161}
162
151/* 163/*
152 * Find right freq to be set now with powersave_bias on. 164 * Find right freq to be set now with powersave_bias on.
153 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 165 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
@@ -234,12 +246,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
234 return sprintf(buf, "%u\n", min_sampling_rate); 246 return sprintf(buf, "%u\n", min_sampling_rate);
235} 247}
236 248
237#define define_one_ro(_name) \ 249define_one_global_ro(sampling_rate_max);
238static struct global_attr _name = \ 250define_one_global_ro(sampling_rate_min);
239__ATTR(_name, 0444, show_##_name, NULL)
240
241define_one_ro(sampling_rate_max);
242define_one_ro(sampling_rate_min);
243 251
244/* cpufreq_ondemand Governor Tunables */ 252/* cpufreq_ondemand Governor Tunables */
245#define show_one(file_name, object) \ 253#define show_one(file_name, object) \
@@ -249,6 +257,7 @@ static ssize_t show_##file_name \
249 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 257 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
250} 258}
251show_one(sampling_rate, sampling_rate); 259show_one(sampling_rate, sampling_rate);
260show_one(io_is_busy, io_is_busy);
252show_one(up_threshold, up_threshold); 261show_one(up_threshold, up_threshold);
253show_one(ignore_nice_load, ignore_nice); 262show_one(ignore_nice_load, ignore_nice);
254show_one(powersave_bias, powersave_bias); 263show_one(powersave_bias, powersave_bias);
@@ -274,12 +283,8 @@ show_one_old(powersave_bias);
274show_one_old(sampling_rate_min); 283show_one_old(sampling_rate_min);
275show_one_old(sampling_rate_max); 284show_one_old(sampling_rate_max);
276 285
277#define define_one_ro_old(object, _name) \ 286cpufreq_freq_attr_ro_old(sampling_rate_min);
278static struct freq_attr object = \ 287cpufreq_freq_attr_ro_old(sampling_rate_max);
279__ATTR(_name, 0444, show_##_name##_old, NULL)
280
281define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
282define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
283 288
284/*** delete after deprecation time ***/ 289/*** delete after deprecation time ***/
285 290
@@ -299,6 +304,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
299 return count; 304 return count;
300} 305}
301 306
307static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
308 const char *buf, size_t count)
309{
310 unsigned int input;
311 int ret;
312
313 ret = sscanf(buf, "%u", &input);
314 if (ret != 1)
315 return -EINVAL;
316
317 mutex_lock(&dbs_mutex);
318 dbs_tuners_ins.io_is_busy = !!input;
319 mutex_unlock(&dbs_mutex);
320
321 return count;
322}
323
302static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 324static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
303 const char *buf, size_t count) 325 const char *buf, size_t count)
304{ 326{
@@ -376,14 +398,11 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
376 return count; 398 return count;
377} 399}
378 400
379#define define_one_rw(_name) \ 401define_one_global_rw(sampling_rate);
380static struct global_attr _name = \ 402define_one_global_rw(io_is_busy);
381__ATTR(_name, 0644, show_##_name, store_##_name) 403define_one_global_rw(up_threshold);
382 404define_one_global_rw(ignore_nice_load);
383define_one_rw(sampling_rate); 405define_one_global_rw(powersave_bias);
384define_one_rw(up_threshold);
385define_one_rw(ignore_nice_load);
386define_one_rw(powersave_bias);
387 406
388static struct attribute *dbs_attributes[] = { 407static struct attribute *dbs_attributes[] = {
389 &sampling_rate_max.attr, 408 &sampling_rate_max.attr,
@@ -392,6 +411,7 @@ static struct attribute *dbs_attributes[] = {
392 &up_threshold.attr, 411 &up_threshold.attr,
393 &ignore_nice_load.attr, 412 &ignore_nice_load.attr,
394 &powersave_bias.attr, 413 &powersave_bias.attr,
414 &io_is_busy.attr,
395 NULL 415 NULL
396}; 416};
397 417
@@ -415,14 +435,10 @@ write_one_old(up_threshold);
415write_one_old(ignore_nice_load); 435write_one_old(ignore_nice_load);
416write_one_old(powersave_bias); 436write_one_old(powersave_bias);
417 437
418#define define_one_rw_old(object, _name) \ 438cpufreq_freq_attr_rw_old(sampling_rate);
419static struct freq_attr object = \ 439cpufreq_freq_attr_rw_old(up_threshold);
420__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) 440cpufreq_freq_attr_rw_old(ignore_nice_load);
421 441cpufreq_freq_attr_rw_old(powersave_bias);
422define_one_rw_old(sampling_rate_old, sampling_rate);
423define_one_rw_old(up_threshold_old, up_threshold);
424define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
425define_one_rw_old(powersave_bias_old, powersave_bias);
426 442
427static struct attribute *dbs_attributes_old[] = { 443static struct attribute *dbs_attributes_old[] = {
428 &sampling_rate_max_old.attr, 444 &sampling_rate_max_old.attr,
@@ -470,14 +486,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
470 486
471 for_each_cpu(j, policy->cpus) { 487 for_each_cpu(j, policy->cpus) {
472 struct cpu_dbs_info_s *j_dbs_info; 488 struct cpu_dbs_info_s *j_dbs_info;
473 cputime64_t cur_wall_time, cur_idle_time; 489 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
474 unsigned int idle_time, wall_time; 490 unsigned int idle_time, wall_time, iowait_time;
475 unsigned int load, load_freq; 491 unsigned int load, load_freq;
476 int freq_avg; 492 int freq_avg;
477 493
478 j_dbs_info = &per_cpu(od_cpu_dbs_info, j); 494 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
479 495
480 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 496 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
497 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
481 498
482 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 499 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
483 j_dbs_info->prev_cpu_wall); 500 j_dbs_info->prev_cpu_wall);
@@ -487,6 +504,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
487 j_dbs_info->prev_cpu_idle); 504 j_dbs_info->prev_cpu_idle);
488 j_dbs_info->prev_cpu_idle = cur_idle_time; 505 j_dbs_info->prev_cpu_idle = cur_idle_time;
489 506
507 iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
508 j_dbs_info->prev_cpu_iowait);
509 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
510
490 if (dbs_tuners_ins.ignore_nice) { 511 if (dbs_tuners_ins.ignore_nice) {
491 cputime64_t cur_nice; 512 cputime64_t cur_nice;
492 unsigned long cur_nice_jiffies; 513 unsigned long cur_nice_jiffies;
@@ -504,6 +525,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
504 idle_time += jiffies_to_usecs(cur_nice_jiffies); 525 idle_time += jiffies_to_usecs(cur_nice_jiffies);
505 } 526 }
506 527
528 /*
529 * For the purpose of ondemand, waiting for disk IO is an
530 * indication that you're performance critical, and not that
531 * the system is actually idle. So subtract the iowait time
532 * from the cpu idle time.
533 */
534
535 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
536 idle_time -= iowait_time;
537
507 if (unlikely(!wall_time || wall_time < idle_time)) 538 if (unlikely(!wall_time || wall_time < idle_time))
508 continue; 539 continue;
509 540
@@ -617,6 +648,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
617 cancel_delayed_work_sync(&dbs_info->work); 648 cancel_delayed_work_sync(&dbs_info->work);
618} 649}
619 650
651/*
652 * Not all CPUs want IO time to be accounted as busy; this dependson how
653 * efficient idling at a higher frequency/voltage is.
654 * Pavel Machek says this is not so for various generations of AMD and old
655 * Intel systems.
656 * Mike Chan (androidlcom) calis this is also not true for ARM.
657 * Because of this, whitelist specific known (series) of CPUs by default, and
658 * leave all others up to the user.
659 */
660static int should_io_be_busy(void)
661{
662#if defined(CONFIG_X86)
663 /*
664 * For Intel, Core 2 (model 15) andl later have an efficient idle.
665 */
666 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
667 boot_cpu_data.x86 == 6 &&
668 boot_cpu_data.x86_model >= 15)
669 return 1;
670#endif
671 return 0;
672}
673
620static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 674static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
621 unsigned int event) 675 unsigned int event)
622{ 676{
@@ -679,6 +733,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
679 dbs_tuners_ins.sampling_rate = 733 dbs_tuners_ins.sampling_rate =
680 max(min_sampling_rate, 734 max(min_sampling_rate,
681 latency * LATENCY_MULTIPLIER); 735 latency * LATENCY_MULTIPLIER);
736 dbs_tuners_ins.io_is_busy = should_io_be_busy();
682 } 737 }
683 mutex_unlock(&dbs_mutex); 738 mutex_unlock(&dbs_mutex);
684 739
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 1c1ceb4f218f..12c98900dcf8 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,7 +67,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 68 struct ladder_device_state *last_state;
69 int last_residency, last_idx = ldev->last_state_idx; 69 int last_residency, last_idx = ldev->last_state_idx;
70 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); 70 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
71 71
72 /* Special case when user has set very strict latency requirement */ 72 /* Special case when user has set very strict latency requirement */
73 if (unlikely(latency_req == 0)) { 73 if (unlikely(latency_req == 0)) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f8e57c6303f2..b81ad9c731ae 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -182,7 +182,7 @@ static u64 div_round64(u64 dividend, u32 divisor)
182static int menu_select(struct cpuidle_device *dev) 182static int menu_select(struct cpuidle_device *dev)
183{ 183{
184 struct menu_device *data = &__get_cpu_var(menu_devices); 184 struct menu_device *data = &__get_cpu_var(menu_devices);
185 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); 185 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
186 int i; 186 int i;
187 int multiplier; 187 int multiplier;
188 188
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6f25a20de99f..323afef77802 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29 29#include <linux/sh_dma.h>
30#include <asm/dmaengine.h>
31 30
32#include "shdma.h" 31#include "shdma.h"
33 32
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
45#define LOG2_DEFAULT_XFER_SIZE 2 44#define LOG2_DEFAULT_XFER_SIZE 2
46 45
47/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 46/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
48static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; 47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
49 48
50static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 49static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
51 50
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
190 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
191 struct sh_dmae_device, common); 190 struct sh_dmae_device, common);
192 struct sh_dmae_pdata *pdata = shdev->pdata; 191 struct sh_dmae_pdata *pdata = shdev->pdata;
193 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
194 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
195 int shift = chan_pdata->dmars_bit; 194 int shift = chan_pdata->dmars_bit;
196 195
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
266 return NULL; 265 return NULL;
267} 266}
268 267
269static struct sh_dmae_slave_config *sh_dmae_find_slave( 268static const struct sh_dmae_slave_config *sh_dmae_find_slave(
270 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) 269 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
271{ 270{
272 struct dma_device *dma_dev = sh_chan->common.device; 271 struct dma_device *dma_dev = sh_chan->common.device;
273 struct sh_dmae_device *shdev = container_of(dma_dev, 272 struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
275 struct sh_dmae_pdata *pdata = shdev->pdata; 274 struct sh_dmae_pdata *pdata = shdev->pdata;
276 int i; 275 int i;
277 276
278 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) 277 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
279 return NULL; 278 return NULL;
280 279
281 for (i = 0; i < pdata->slave_num; i++) 280 for (i = 0; i < pdata->slave_num; i++)
282 if (pdata->slave[i].slave_id == slave_id) 281 if (pdata->slave[i].slave_id == param->slave_id)
283 return pdata->slave + i; 282 return pdata->slave + i;
284 283
285 return NULL; 284 return NULL;
@@ -299,9 +298,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
299 * never runs concurrently with itself or free_chan_resources. 298 * never runs concurrently with itself or free_chan_resources.
300 */ 299 */
301 if (param) { 300 if (param) {
302 struct sh_dmae_slave_config *cfg; 301 const struct sh_dmae_slave_config *cfg;
303 302
304 cfg = sh_dmae_find_slave(sh_chan, param->slave_id); 303 cfg = sh_dmae_find_slave(sh_chan, param);
305 if (!cfg) { 304 if (!cfg) {
306 ret = -EINVAL; 305 ret = -EINVAL;
307 goto efindslave; 306 goto efindslave;
@@ -574,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
574{ 573{
575 struct sh_dmae_slave *param; 574 struct sh_dmae_slave *param;
576 struct sh_dmae_chan *sh_chan; 575 struct sh_dmae_chan *sh_chan;
576 dma_addr_t slave_addr;
577 577
578 if (!chan) 578 if (!chan)
579 return NULL; 579 return NULL;
580 580
581 sh_chan = to_sh_chan(chan); 581 sh_chan = to_sh_chan(chan);
582 param = chan->private; 582 param = chan->private;
583 slave_addr = param->config->addr;
583 584
584 /* Someone calling slave DMA on a public channel? */ 585 /* Someone calling slave DMA on a public channel? */
585 if (!param || !sg_len) { 586 if (!param || !sg_len) {
@@ -592,7 +593,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
592 * if (param != NULL), this is a successfully requested slave channel, 593 * if (param != NULL), this is a successfully requested slave channel,
593 * therefore param->config != NULL too. 594 * therefore param->config != NULL too.
594 */ 595 */
595 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr, 596 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
596 direction, flags); 597 direction, flags);
597} 598}
598 599
@@ -873,7 +874,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
873 int irq, unsigned long flags) 874 int irq, unsigned long flags)
874{ 875{
875 int err; 876 int err;
876 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 877 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
877 struct platform_device *pdev = to_platform_device(shdev->common.dev); 878 struct platform_device *pdev = to_platform_device(shdev->common.dev);
878 struct sh_dmae_chan *new_sh_chan; 879 struct sh_dmae_chan *new_sh_chan;
879 880
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96c..4021275a0a43 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#include <asm/dmaengine.h> 20#define SH_DMAC_MAX_CHANNELS 6
21 21#define SH_DMA_SLAVE_NUMBER 256
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 23
24struct device; 24struct device;
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 753219cf993a..41a9388f2fde 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -80,8 +80,8 @@ static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
80 u16 reg; 80 u16 reg;
81 u8 bit; 81 u8 bit;
82 82
83 bit = gpio_num % 7; 83 bit = gpio_num % 8;
84 reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba; 84 reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
85 85
86 return !!(inb(reg) & (1 << bit)); 86 return !!(inb(reg) & (1 << bit));
87} 87}
@@ -91,8 +91,8 @@ static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
91 u8 curr_dirs; 91 u8 curr_dirs;
92 u8 io_reg, bit; 92 u8 io_reg, bit;
93 93
94 bit = gpio_num % 7; 94 bit = gpio_num % 8;
95 io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO; 95 io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
96 96
97 spin_lock(&sio_lock); 97 spin_lock(&sio_lock);
98 98
@@ -116,8 +116,8 @@ static void it8761e_gpio_set(struct gpio_chip *gc,
116 u8 curr_vals, bit; 116 u8 curr_vals, bit;
117 u16 reg; 117 u16 reg;
118 118
119 bit = gpio_num % 7; 119 bit = gpio_num % 8;
120 reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba; 120 reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
121 121
122 spin_lock(&sio_lock); 122 spin_lock(&sio_lock);
123 123
@@ -135,8 +135,8 @@ static int it8761e_gpio_direction_out(struct gpio_chip *gc,
135{ 135{
136 u8 curr_dirs, io_reg, bit; 136 u8 curr_dirs, io_reg, bit;
137 137
138 bit = gpio_num % 7; 138 bit = gpio_num % 8;
139 io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO; 139 io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
140 140
141 it8761e_gpio_set(gc, gpio_num, val); 141 it8761e_gpio_set(gc, gpio_num, val);
142 142
@@ -200,7 +200,7 @@ static int __init it8761e_gpio_init(void)
200 return -EBUSY; 200 return -EBUSY;
201 201
202 it8761e_gpio_chip.base = -1; 202 it8761e_gpio_chip.base = -1;
203 it8761e_gpio_chip.ngpio = 14; 203 it8761e_gpio_chip.ngpio = 16;
204 204
205 err = gpiochip_add(&it8761e_gpio_chip); 205 err = gpiochip_add(&it8761e_gpio_chip);
206 if (err < 0) 206 if (err < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2b8b969d0c15..df6a9cd82c4d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -456,11 +456,15 @@ i915_error_object_create(struct drm_device *dev,
456 456
457 for (page = 0; page < page_count; page++) { 457 for (page = 0; page < page_count; page++) {
458 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 458 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
459 unsigned long flags;
460
459 if (d == NULL) 461 if (d == NULL)
460 goto unwind; 462 goto unwind;
461 s = kmap_atomic(src_priv->pages[page], KM_USER0); 463 local_irq_save(flags);
464 s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
462 memcpy(d, s, PAGE_SIZE); 465 memcpy(d, s, PAGE_SIZE);
463 kunmap_atomic(s, KM_USER0); 466 kunmap_atomic(s, KM_IRQ0);
467 local_irq_restore(flags);
464 dst->pages[page] = d; 468 dst->pages[page] = d;
465 } 469 }
466 dst->page_count = page_count; 470 dst->page_count = page_count;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 40ab6d9c3736..cc5316dcf580 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); 426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
427 offset = *cmd << 10; 427 offset = *cmd3 << 10;
428 if (radeon_check_and_fixup_offset 428 if (radeon_check_and_fixup_offset
429 (dev_priv, file_priv, &offset)) { 429 (dev_priv, file_priv, &offset)) {
430 DRM_ERROR("Invalid second packet offset\n"); 430 DRM_ERROR("Invalid second packet offset\n");
@@ -2895,9 +2895,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2895 return rv; 2895 return rv;
2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, 2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2897 cmdbuf->bufsz); 2897 cmdbuf->bufsz);
2898 if (rv) 2898 if (rv) {
2899 drm_buffer_free(cmdbuf->buffer);
2899 return rv; 2900 return rv;
2900 } 2901 }
2902 } else
2903 goto done;
2901 2904
2902 orig_nbox = cmdbuf->nbox; 2905 orig_nbox = cmdbuf->nbox;
2903 2906
@@ -2905,8 +2908,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2905 int temp; 2908 int temp;
2906 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2909 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2907 2910
2908 if (cmdbuf->bufsz != 0) 2911 drm_buffer_free(cmdbuf->buffer);
2909 drm_buffer_free(cmdbuf->buffer);
2910 2912
2911 return temp; 2913 return temp;
2912 } 2914 }
@@ -3012,16 +3014,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
3012 } 3014 }
3013 } 3015 }
3014 3016
3015 if (cmdbuf->bufsz != 0) 3017 drm_buffer_free(cmdbuf->buffer);
3016 drm_buffer_free(cmdbuf->buffer);
3017 3018
3019 done:
3018 DRM_DEBUG("DONE\n"); 3020 DRM_DEBUG("DONE\n");
3019 COMMIT_RING(); 3021 COMMIT_RING();
3020 return 0; 3022 return 0;
3021 3023
3022 err: 3024 err:
3023 if (cmdbuf->bufsz != 0) 3025 drm_buffer_free(cmdbuf->buffer);
3024 drm_buffer_free(cmdbuf->buffer);
3025 return -EINVAL; 3026 return -EINVAL;
3026} 3027}
3027 3028
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 7e597d7f770f..24663a8717b1 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -59,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
59 59
60static const struct hid_device_id ch_devices[] = { 60static const struct hid_device_id ch_devices[] = {
61 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 61 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
62 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
62 { } 63 { }
63}; 64};
64MODULE_DEVICE_TABLE(hid, ch_devices); 65MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2e2aa759d230..143e788b729b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1043,13 +1043,8 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1043 1043
1044 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 1044 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
1045 hid->hiddev_report_event(hid, report); 1045 hid->hiddev_report_event(hid, report);
1046 if (hid->claimed & HID_CLAIMED_HIDRAW) { 1046 if (hid->claimed & HID_CLAIMED_HIDRAW)
1047 /* numbered reports need to be passed with the report num */ 1047 hidraw_report_event(hid, data, size);
1048 if (report_enum->numbered)
1049 hidraw_report_event(hid, data - 1, size + 1);
1050 else
1051 hidraw_report_event(hid, data, size);
1052 }
1053 1048
1054 for (a = 0; a < report->maxfield; a++) 1049 for (a = 0; a < report->maxfield; a++)
1055 hid_input_field(hid, report->field[a], cdata, interrupt); 1050 hid_input_field(hid, report->field[a], cdata, interrupt);
@@ -1296,6 +1291,7 @@ static const struct hid_device_id hid_blacklist[] = {
1296 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1297 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1292 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1298 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
1294 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1299 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1300 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1296 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
1301 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, 1297 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 797e06470356..09d27649a0f7 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -131,6 +131,7 @@
131 131
132#define USB_VENDOR_ID_CHERRY 0x046a 132#define USB_VENDOR_ID_CHERRY 0x046a
133#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 133#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
134#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
134 135
135#define USB_VENDOR_ID_CHIC 0x05fe 136#define USB_VENDOR_ID_CHIC 0x05fe
136#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014 137#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 9b24fc510712..4777bbfa1cc2 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * HID driver for N-Trig touchscreens 2 * HID driver for N-Trig touchscreens
3 * 3 *
4 * Copyright (c) 2008 Rafi Rubin 4 * Copyright (c) 2008-2010 Rafi Rubin
5 * Copyright (c) 2009 Stephane Chatty 5 * Copyright (c) 2009-2010 Stephane Chatty
6 * 6 *
7 */ 7 */
8 8
@@ -15,6 +15,8 @@
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/hid.h> 17#include <linux/hid.h>
18#include <linux/usb.h>
19#include "usbhid/usbhid.h"
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/slab.h> 21#include <linux/slab.h>
20 22
@@ -22,17 +24,16 @@
22 24
23#define NTRIG_DUPLICATE_USAGES 0x001 25#define NTRIG_DUPLICATE_USAGES 0x001
24 26
25#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
26 EV_KEY, (c))
27
28struct ntrig_data { 27struct ntrig_data {
29 /* Incoming raw values for a single contact */ 28 /* Incoming raw values for a single contact */
30 __u16 x, y, w, h; 29 __u16 x, y, w, h;
31 __u16 id; 30 __u16 id;
32 __u8 confidence; 31
32 bool tipswitch;
33 bool confidence;
34 bool first_contact_touch;
33 35
34 bool reading_mt; 36 bool reading_mt;
35 __u8 first_contact_confidence;
36 37
37 __u8 mt_footer[4]; 38 __u8 mt_footer[4];
38 __u8 mt_foot_count; 39 __u8 mt_foot_count;
@@ -139,9 +140,10 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
139 case 0xff000001: 140 case 0xff000001:
140 /* Tag indicating the start of a multitouch group */ 141 /* Tag indicating the start of a multitouch group */
141 nd->reading_mt = 1; 142 nd->reading_mt = 1;
142 nd->first_contact_confidence = 0; 143 nd->first_contact_touch = 0;
143 break; 144 break;
144 case HID_DG_TIPSWITCH: 145 case HID_DG_TIPSWITCH:
146 nd->tipswitch = value;
145 /* Prevent emission of touch until validated */ 147 /* Prevent emission of touch until validated */
146 return 1; 148 return 1;
147 case HID_DG_CONFIDENCE: 149 case HID_DG_CONFIDENCE:
@@ -169,8 +171,14 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
169 * to emit a normal (X, Y) position 171 * to emit a normal (X, Y) position
170 */ 172 */
171 if (!nd->reading_mt) { 173 if (!nd->reading_mt) {
174 /*
175 * TipSwitch indicates the presence of a
176 * finger in single touch mode.
177 */
178 input_report_key(input, BTN_TOUCH,
179 nd->tipswitch);
172 input_report_key(input, BTN_TOOL_DOUBLETAP, 180 input_report_key(input, BTN_TOOL_DOUBLETAP,
173 (nd->confidence != 0)); 181 nd->tipswitch);
174 input_event(input, EV_ABS, ABS_X, nd->x); 182 input_event(input, EV_ABS, ABS_X, nd->x);
175 input_event(input, EV_ABS, ABS_Y, nd->y); 183 input_event(input, EV_ABS, ABS_Y, nd->y);
176 } 184 }
@@ -209,7 +217,13 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
209 217
210 /* emit a normal (X, Y) for the first point only */ 218 /* emit a normal (X, Y) for the first point only */
211 if (nd->id == 0) { 219 if (nd->id == 0) {
212 nd->first_contact_confidence = nd->confidence; 220 /*
221 * TipSwitch is superfluous in multitouch
222 * mode. The footer events tell us
223 * if there is a finger on the screen or
224 * not.
225 */
226 nd->first_contact_touch = nd->confidence;
213 input_event(input, EV_ABS, ABS_X, nd->x); 227 input_event(input, EV_ABS, ABS_X, nd->x);
214 input_event(input, EV_ABS, ABS_Y, nd->y); 228 input_event(input, EV_ABS, ABS_Y, nd->y);
215 } 229 }
@@ -239,30 +253,11 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
239 253
240 nd->reading_mt = 0; 254 nd->reading_mt = 0;
241 255
242 if (nd->first_contact_confidence) { 256 if (nd->first_contact_touch) {
243 switch (value) { 257 input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
244 case 0: /* for single touch devices */
245 case 1:
246 input_report_key(input,
247 BTN_TOOL_DOUBLETAP, 1);
248 break;
249 case 2:
250 input_report_key(input,
251 BTN_TOOL_TRIPLETAP, 1);
252 break;
253 case 3:
254 default:
255 input_report_key(input,
256 BTN_TOOL_QUADTAP, 1);
257 }
258 input_report_key(input, BTN_TOUCH, 1); 258 input_report_key(input, BTN_TOUCH, 1);
259 } else { 259 } else {
260 input_report_key(input, 260 input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
261 BTN_TOOL_DOUBLETAP, 0);
262 input_report_key(input,
263 BTN_TOOL_TRIPLETAP, 0);
264 input_report_key(input,
265 BTN_TOOL_QUADTAP, 0);
266 input_report_key(input, BTN_TOUCH, 0); 261 input_report_key(input, BTN_TOUCH, 0);
267 } 262 }
268 break; 263 break;
@@ -286,6 +281,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
286 struct ntrig_data *nd; 281 struct ntrig_data *nd;
287 struct hid_input *hidinput; 282 struct hid_input *hidinput;
288 struct input_dev *input; 283 struct input_dev *input;
284 struct hid_report *report;
289 285
290 if (id->driver_data) 286 if (id->driver_data)
291 hdev->quirks |= HID_QUIRK_MULTI_INPUT; 287 hdev->quirks |= HID_QUIRK_MULTI_INPUT;
@@ -327,13 +323,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
327 __clear_bit(BTN_TOOL_PEN, input->keybit); 323 __clear_bit(BTN_TOOL_PEN, input->keybit);
328 __clear_bit(BTN_TOOL_FINGER, input->keybit); 324 __clear_bit(BTN_TOOL_FINGER, input->keybit);
329 __clear_bit(BTN_0, input->keybit); 325 __clear_bit(BTN_0, input->keybit);
330 /*
331 * A little something special to enable
332 * two and three finger taps.
333 */
334 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 326 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
335 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
336 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
337 /* 327 /*
338 * The physical touchscreen (single touch) 328 * The physical touchscreen (single touch)
339 * input has a value for physical, whereas 329 * input has a value for physical, whereas
@@ -349,6 +339,12 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
349 } 339 }
350 } 340 }
351 341
342 /* This is needed for devices with more recent firmware versions */
343 report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
344 if (report)
345 usbhid_submit_report(hdev, report, USB_DIR_OUT);
346
347
352 return 0; 348 return 0;
353err_free: 349err_free:
354 kfree(nd); 350 kfree(nd);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7502a4b2fa86..402d5574b574 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -76,7 +76,7 @@ static int sony_set_operational_usb(struct hid_device *hdev)
76 76
77static int sony_set_operational_bt(struct hid_device *hdev) 77static int sony_set_operational_bt(struct hid_device *hdev)
78{ 78{
79 unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 }; 79 unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
80 return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); 80 return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
81} 81}
82 82
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f7700cf49721..f947d8337e21 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -277,7 +277,6 @@ static int __init wacom_init(void)
277 ret = hid_register_driver(&wacom_driver); 277 ret = hid_register_driver(&wacom_driver);
278 if (ret) 278 if (ret)
279 printk(KERN_ERR "can't register wacom driver\n"); 279 printk(KERN_ERR "can't register wacom driver\n");
280 printk(KERN_ERR "wacom driver registered\n");
281 return ret; 280 return ret;
282} 281}
283 282
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 56d06cd8075b..7b85b696fdab 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -999,13 +999,6 @@ static int usbhid_start(struct hid_device *hid)
999 } 999 }
1000 } 1000 }
1001 1001
1002 init_waitqueue_head(&usbhid->wait);
1003 INIT_WORK(&usbhid->reset_work, hid_reset);
1004 INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
1005 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
1006
1007 spin_lock_init(&usbhid->lock);
1008
1009 usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); 1002 usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
1010 if (!usbhid->urbctrl) { 1003 if (!usbhid->urbctrl) {
1011 ret = -ENOMEM; 1004 ret = -ENOMEM;
@@ -1179,6 +1172,12 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
1179 usbhid->intf = intf; 1172 usbhid->intf = intf;
1180 usbhid->ifnum = interface->desc.bInterfaceNumber; 1173 usbhid->ifnum = interface->desc.bInterfaceNumber;
1181 1174
1175 init_waitqueue_head(&usbhid->wait);
1176 INIT_WORK(&usbhid->reset_work, hid_reset);
1177 INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
1178 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
1179 spin_lock_init(&usbhid->lock);
1180
1182 ret = hid_add_device(hid); 1181 ret = hid_add_device(hid);
1183 if (ret) { 1182 if (ret) {
1184 if (ret != -ENODEV) 1183 if (ret != -ENODEV)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0f28d91f29d8..f085c18d2905 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -195,6 +195,9 @@ static unsigned int applesmc_accelerometer;
195/* Indicates whether this computer has light sensors and keyboard backlight. */ 195/* Indicates whether this computer has light sensors and keyboard backlight. */
196static unsigned int applesmc_light; 196static unsigned int applesmc_light;
197 197
198/* The number of fans handled by the driver */
199static unsigned int fans_handled;
200
198/* Indicates which temperature sensors set to use. */ 201/* Indicates which temperature sensors set to use. */
199static unsigned int applesmc_temperature_set; 202static unsigned int applesmc_temperature_set;
200 203
@@ -1492,39 +1495,24 @@ static int __init applesmc_init(void)
1492 1495
1493 /* create fan files */ 1496 /* create fan files */
1494 count = applesmc_get_fan_count(); 1497 count = applesmc_get_fan_count();
1495 if (count < 0) { 1498 if (count < 0)
1496 printk(KERN_ERR "applesmc: Cannot get the number of fans.\n"); 1499 printk(KERN_ERR "applesmc: Cannot get the number of fans.\n");
1497 } else { 1500 else
1498 printk(KERN_INFO "applesmc: %d fans found.\n", count); 1501 printk(KERN_INFO "applesmc: %d fans found.\n", count);
1499 1502
1500 switch (count) { 1503 if (count > 4) {
1501 default: 1504 count = 4;
1502 printk(KERN_WARNING "applesmc: More than 4 fans found," 1505 printk(KERN_WARNING "applesmc: More than 4 fans found,"
1503 " but at most 4 fans are supported" 1506 " but at most 4 fans are supported"
1504 " by the driver.\n"); 1507 " by the driver.\n");
1505 case 4: 1508 }
1506 ret = sysfs_create_group(&pdev->dev.kobj, 1509
1507 &fan_attribute_groups[3]); 1510 while (fans_handled < count) {
1508 if (ret) 1511 ret = sysfs_create_group(&pdev->dev.kobj,
1509 goto out_key_enumeration; 1512 &fan_attribute_groups[fans_handled]);
1510 case 3: 1513 if (ret)
1511 ret = sysfs_create_group(&pdev->dev.kobj, 1514 goto out_fans;
1512 &fan_attribute_groups[2]); 1515 fans_handled++;
1513 if (ret)
1514 goto out_key_enumeration;
1515 case 2:
1516 ret = sysfs_create_group(&pdev->dev.kobj,
1517 &fan_attribute_groups[1]);
1518 if (ret)
1519 goto out_key_enumeration;
1520 case 1:
1521 ret = sysfs_create_group(&pdev->dev.kobj,
1522 &fan_attribute_groups[0]);
1523 if (ret)
1524 goto out_fan_1;
1525 case 0:
1526 ;
1527 }
1528 } 1516 }
1529 1517
1530 for (i = 0; 1518 for (i = 0;
@@ -1593,10 +1581,10 @@ out_accelerometer:
1593 applesmc_release_accelerometer(); 1581 applesmc_release_accelerometer();
1594out_temperature: 1582out_temperature:
1595 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); 1583 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
1596 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]); 1584out_fans:
1597out_fan_1: 1585 while (fans_handled)
1598 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]); 1586 sysfs_remove_group(&pdev->dev.kobj,
1599out_key_enumeration: 1587 &fan_attribute_groups[--fans_handled]);
1600 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); 1588 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
1601out_name: 1589out_name:
1602 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); 1590 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
@@ -1622,8 +1610,9 @@ static void __exit applesmc_exit(void)
1622 if (applesmc_accelerometer) 1610 if (applesmc_accelerometer)
1623 applesmc_release_accelerometer(); 1611 applesmc_release_accelerometer();
1624 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); 1612 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
1625 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]); 1613 while (fans_handled)
1626 sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]); 1614 sysfs_remove_group(&pdev->dev.kobj,
1615 &fan_attribute_groups[--fans_handled]);
1627 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); 1616 sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
1628 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); 1617 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
1629 platform_device_unregister(pdev); 1618 platform_device_unregister(pdev);
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 7f948105d8ad..0f388adc6187 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -268,8 +268,11 @@ static ssize_t store_fan16(struct device *dev,
268 if (strict_strtol(buf, 10, &reqval)) 268 if (strict_strtol(buf, 10, &reqval))
269 return -EINVAL; 269 return -EINVAL;
270 270
271 /* If a minimum RPM of zero is requested, then we set the register to
272 0xffff. This value allows the fan to be stopped completely without
273 generating an alarm. */
271 reqval = 274 reqval =
272 (SENSORS_LIMIT((reqval) <= 0 ? 0 : 5400000 / (reqval), 0, 65534)); 275 (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
273 276
274 mutex_lock(&data->update_lock); 277 mutex_lock(&data->update_lock);
275 data->reg[param->msb[0]] = (reqval >> 8) & 0xff; 278 data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -285,8 +288,9 @@ static ssize_t store_fan16(struct device *dev,
285 * Voltages are scaled in the device so that the nominal voltage 288 * Voltages are scaled in the device so that the nominal voltage
286 * is 3/4ths of the 0-255 range (i.e. 192). 289 * is 3/4ths of the 0-255 range (i.e. 192).
287 * If all voltages are 'normal' then all voltage registers will 290 * If all voltages are 'normal' then all voltage registers will
288 * read 0xC0. This doesn't help us if we don't have a point of refernce. 291 * read 0xC0.
289 * The data sheet however provides us with the full scale value for each 292 *
293 * The data sheet provides us with the 3/4 scale value for each voltage
290 * which is stored in in_scaling. The sda->index parameter value provides 294 * which is stored in in_scaling. The sda->index parameter value provides
291 * the index into in_scaling. 295 * the index into in_scaling.
292 * 296 *
@@ -295,7 +299,7 @@ static ssize_t store_fan16(struct device *dev,
295 */ 299 */
296 300
297static int asc7621_in_scaling[] = { 301static int asc7621_in_scaling[] = {
298 3320, 3000, 4380, 6640, 16000 302 2500, 2250, 3300, 5000, 12000
299}; 303};
300 304
301static ssize_t show_in10(struct device *dev, struct device_attribute *attr, 305static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
@@ -306,19 +310,12 @@ static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
306 u8 nr = sda->index; 310 u8 nr = sda->index;
307 311
308 mutex_lock(&data->update_lock); 312 mutex_lock(&data->update_lock);
309 regval = (data->reg[param->msb[0]] * asc7621_in_scaling[nr]) / 256; 313 regval = (data->reg[param->msb[0]] << 8) | (data->reg[param->lsb[0]]);
310
311 /* The LSB value is a 2-bit scaling of the MSB's LSbit value.
312 * I.E. If the maximim voltage for this input is 6640 millivolts then
313 * a MSB register value of 0 = 0mv and 255 = 6640mv.
314 * A 1 step change therefore represents 25.9mv (6640 / 256).
315 * The extra 2-bits therefore represent increments of 6.48mv.
316 */
317 regval += ((asc7621_in_scaling[nr] / 256) / 4) *
318 (data->reg[param->lsb[0]] >> 6);
319
320 mutex_unlock(&data->update_lock); 314 mutex_unlock(&data->update_lock);
321 315
316 /* The LSB value is a 2-bit scaling of the MSB's LSbit value. */
317 regval = (regval >> 6) * asc7621_in_scaling[nr] / (0xc0 << 2);
318
322 return sprintf(buf, "%u\n", regval); 319 return sprintf(buf, "%u\n", regval);
323} 320}
324 321
@@ -331,7 +328,7 @@ static ssize_t show_in8(struct device *dev, struct device_attribute *attr,
331 328
332 return sprintf(buf, "%u\n", 329 return sprintf(buf, "%u\n",
333 ((data->reg[param->msb[0]] * 330 ((data->reg[param->msb[0]] *
334 asc7621_in_scaling[nr]) / 256)); 331 asc7621_in_scaling[nr]) / 0xc0));
335} 332}
336 333
337static ssize_t store_in8(struct device *dev, struct device_attribute *attr, 334static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
@@ -344,9 +341,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
344 if (strict_strtol(buf, 10, &reqval)) 341 if (strict_strtol(buf, 10, &reqval))
345 return -EINVAL; 342 return -EINVAL;
346 343
347 reqval = SENSORS_LIMIT(reqval, 0, asc7621_in_scaling[nr]); 344 reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
345
346 reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
348 347
349 reqval = (reqval * 255 + 128) / asc7621_in_scaling[nr]; 348 reqval = SENSORS_LIMIT(reqval, 0, 0xff);
350 349
351 mutex_lock(&data->update_lock); 350 mutex_lock(&data->update_lock);
352 data->reg[param->msb[0]] = reqval; 351 data->reg[param->msb[0]] = reqval;
@@ -846,11 +845,11 @@ static struct asc7621_param asc7621_params[] = {
846 PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8), 845 PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8),
847 PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8), 846 PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8),
848 847
849 PREAD(in0_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 0, bitmask), 848 PREAD(in0_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 0, bitmask),
850 PREAD(in1_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 1, bitmask), 849 PREAD(in1_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 1, bitmask),
851 PREAD(in2_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 2, bitmask), 850 PREAD(in2_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 2, bitmask),
852 PREAD(in3_alarm, 3, PRI_LOW, 0x41, 0, 0x01, 3, bitmask), 851 PREAD(in3_alarm, 3, PRI_HIGH, 0x41, 0, 0x01, 3, bitmask),
853 PREAD(in4_alarm, 4, PRI_LOW, 0x42, 0, 0x01, 0, bitmask), 852 PREAD(in4_alarm, 4, PRI_HIGH, 0x42, 0, 0x01, 0, bitmask),
854 853
855 PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16), 854 PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16),
856 PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16), 855 PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16),
@@ -862,10 +861,10 @@ static struct asc7621_param asc7621_params[] = {
862 PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16), 861 PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16),
863 PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16), 862 PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16),
864 863
865 PREAD(fan1_alarm, 0, PRI_LOW, 0x42, 0, 0x01, 0, bitmask), 864 PREAD(fan1_alarm, 0, PRI_HIGH, 0x42, 0, 0x01, 2, bitmask),
866 PREAD(fan2_alarm, 1, PRI_LOW, 0x42, 0, 0x01, 1, bitmask), 865 PREAD(fan2_alarm, 1, PRI_HIGH, 0x42, 0, 0x01, 3, bitmask),
867 PREAD(fan3_alarm, 2, PRI_LOW, 0x42, 0, 0x01, 2, bitmask), 866 PREAD(fan3_alarm, 2, PRI_HIGH, 0x42, 0, 0x01, 4, bitmask),
868 PREAD(fan4_alarm, 3, PRI_LOW, 0x42, 0, 0x01, 3, bitmask), 867 PREAD(fan4_alarm, 3, PRI_HIGH, 0x42, 0, 0x01, 5, bitmask),
869 868
870 PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10), 869 PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10),
871 PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10), 870 PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10),
@@ -886,10 +885,10 @@ static struct asc7621_param asc7621_params[] = {
886 PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8), 885 PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8),
887 PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8), 886 PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8),
888 887
889 PREAD(temp1_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 4, bitmask), 888 PREAD(temp1_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 4, bitmask),
890 PREAD(temp2_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 5, bitmask), 889 PREAD(temp2_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 5, bitmask),
891 PREAD(temp3_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 6, bitmask), 890 PREAD(temp3_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 6, bitmask),
892 PREAD(temp4_alarm, 3, PRI_LOW, 0x43, 0, 0x01, 0, bitmask), 891 PREAD(temp4_alarm, 3, PRI_HIGH, 0x43, 0, 0x01, 0, bitmask),
893 892
894 PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask), 893 PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask),
895 PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask), 894 PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask),
@@ -898,7 +897,7 @@ static struct asc7621_param asc7621_params[] = {
898 897
899 PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask), 898 PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask),
900 PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask), 899 PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask),
901 PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x64, 0, 0x01, 3, bitmask), 900 PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x63, 0, 0x01, 3, bitmask),
902 PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask), 901 PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask),
903 902
904 PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st), 903 PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st),
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index c8ab50516672..7580f55e67e3 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -328,8 +328,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
328 lis3lv02d_joystick_disable(); 328 lis3lv02d_joystick_disable();
329 lis3lv02d_poweroff(&lis3_dev); 329 lis3lv02d_poweroff(&lis3_dev);
330 330
331 flush_work(&hpled_led.work);
332 led_classdev_unregister(&hpled_led.led_classdev); 331 led_classdev_unregister(&hpled_led.led_classdev);
332 flush_work(&hpled_led.work);
333 333
334 return lis3lv02d_remove_fs(&lis3_dev); 334 return lis3lv02d_remove_fs(&lis3_dev);
335} 335}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9c6170cd9aac..87ab0568bb0e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,7 +564,7 @@ config I2C_STU300
564 564
565config I2C_VERSATILE 565config I2C_VERSATILE
566 tristate "ARM Versatile/Realview I2C bus support" 566 tristate "ARM Versatile/Realview I2C bus support"
567 depends on ARCH_VERSATILE || ARCH_REALVIEW 567 depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
568 select I2C_ALGOBIT 568 select I2C_ALGOBIT
569 help 569 help
570 Say yes if you want to support the I2C serial bus on ARMs Versatile 570 Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c2258a51fe0c..7c469a62c3c1 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -159,107 +159,131 @@ static void i2c_device_shutdown(struct device *dev)
159 driver->shutdown(client); 159 driver->shutdown(client);
160} 160}
161 161
162#ifdef CONFIG_SUSPEND 162#ifdef CONFIG_PM_SLEEP
163static int i2c_device_pm_suspend(struct device *dev) 163static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
164{ 164{
165 const struct dev_pm_ops *pm; 165 struct i2c_client *client = i2c_verify_client(dev);
166 struct i2c_driver *driver;
166 167
167 if (!dev->driver) 168 if (!client || !dev->driver)
168 return 0; 169 return 0;
169 pm = dev->driver->pm; 170 driver = to_i2c_driver(dev->driver);
170 if (!pm || !pm->suspend) 171 if (!driver->suspend)
171 return 0; 172 return 0;
172 return pm->suspend(dev); 173 return driver->suspend(client, mesg);
173} 174}
174 175
175static int i2c_device_pm_resume(struct device *dev) 176static int i2c_legacy_resume(struct device *dev)
176{ 177{
177 const struct dev_pm_ops *pm; 178 struct i2c_client *client = i2c_verify_client(dev);
179 struct i2c_driver *driver;
178 180
179 if (!dev->driver) 181 if (!client || !dev->driver)
180 return 0; 182 return 0;
181 pm = dev->driver->pm; 183 driver = to_i2c_driver(dev->driver);
182 if (!pm || !pm->resume) 184 if (!driver->resume)
183 return 0; 185 return 0;
184 return pm->resume(dev); 186 return driver->resume(client);
185} 187}
186#else
187#define i2c_device_pm_suspend NULL
188#define i2c_device_pm_resume NULL
189#endif
190 188
191#ifdef CONFIG_PM_RUNTIME 189static int i2c_device_pm_suspend(struct device *dev)
192static int i2c_device_runtime_suspend(struct device *dev)
193{ 190{
194 const struct dev_pm_ops *pm; 191 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
195 192
196 if (!dev->driver) 193 if (pm_runtime_suspended(dev))
197 return 0;
198 pm = dev->driver->pm;
199 if (!pm || !pm->runtime_suspend)
200 return 0; 194 return 0;
201 return pm->runtime_suspend(dev);
202}
203 195
204static int i2c_device_runtime_resume(struct device *dev) 196 if (pm)
205{ 197 return pm->suspend ? pm->suspend(dev) : 0;
206 const struct dev_pm_ops *pm;
207 198
208 if (!dev->driver) 199 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
209 return 0;
210 pm = dev->driver->pm;
211 if (!pm || !pm->runtime_resume)
212 return 0;
213 return pm->runtime_resume(dev);
214} 200}
215 201
216static int i2c_device_runtime_idle(struct device *dev) 202static int i2c_device_pm_resume(struct device *dev)
217{ 203{
218 const struct dev_pm_ops *pm = NULL; 204 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
219 int ret; 205 int ret;
220 206
221 if (dev->driver) 207 if (pm)
222 pm = dev->driver->pm; 208 ret = pm->resume ? pm->resume(dev) : 0;
223 if (pm && pm->runtime_idle) { 209 else
224 ret = pm->runtime_idle(dev); 210 ret = i2c_legacy_resume(dev);
225 if (ret) 211
226 return ret; 212 if (!ret) {
213 pm_runtime_disable(dev);
214 pm_runtime_set_active(dev);
215 pm_runtime_enable(dev);
227 } 216 }
228 217
229 return pm_runtime_suspend(dev); 218 return ret;
230} 219}
231#else
232#define i2c_device_runtime_suspend NULL
233#define i2c_device_runtime_resume NULL
234#define i2c_device_runtime_idle NULL
235#endif
236 220
237static int i2c_device_suspend(struct device *dev, pm_message_t mesg) 221static int i2c_device_pm_freeze(struct device *dev)
238{ 222{
239 struct i2c_client *client = i2c_verify_client(dev); 223 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
240 struct i2c_driver *driver;
241 224
242 if (!client || !dev->driver) 225 if (pm_runtime_suspended(dev))
243 return 0; 226 return 0;
244 driver = to_i2c_driver(dev->driver); 227
245 if (!driver->suspend) 228 if (pm)
246 return 0; 229 return pm->freeze ? pm->freeze(dev) : 0;
247 return driver->suspend(client, mesg); 230
231 return i2c_legacy_suspend(dev, PMSG_FREEZE);
248} 232}
249 233
250static int i2c_device_resume(struct device *dev) 234static int i2c_device_pm_thaw(struct device *dev)
251{ 235{
252 struct i2c_client *client = i2c_verify_client(dev); 236 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
253 struct i2c_driver *driver;
254 237
255 if (!client || !dev->driver) 238 if (pm_runtime_suspended(dev))
256 return 0; 239 return 0;
257 driver = to_i2c_driver(dev->driver); 240
258 if (!driver->resume) 241 if (pm)
242 return pm->thaw ? pm->thaw(dev) : 0;
243
244 return i2c_legacy_resume(dev);
245}
246
247static int i2c_device_pm_poweroff(struct device *dev)
248{
249 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
250
251 if (pm_runtime_suspended(dev))
259 return 0; 252 return 0;
260 return driver->resume(client); 253
254 if (pm)
255 return pm->poweroff ? pm->poweroff(dev) : 0;
256
257 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
261} 258}
262 259
260static int i2c_device_pm_restore(struct device *dev)
261{
262 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
263 int ret;
264
265 if (pm)
266 ret = pm->restore ? pm->restore(dev) : 0;
267 else
268 ret = i2c_legacy_resume(dev);
269
270 if (!ret) {
271 pm_runtime_disable(dev);
272 pm_runtime_set_active(dev);
273 pm_runtime_enable(dev);
274 }
275
276 return ret;
277}
278#else /* !CONFIG_PM_SLEEP */
279#define i2c_device_pm_suspend NULL
280#define i2c_device_pm_resume NULL
281#define i2c_device_pm_freeze NULL
282#define i2c_device_pm_thaw NULL
283#define i2c_device_pm_poweroff NULL
284#define i2c_device_pm_restore NULL
285#endif /* !CONFIG_PM_SLEEP */
286
263static void i2c_client_dev_release(struct device *dev) 287static void i2c_client_dev_release(struct device *dev)
264{ 288{
265 kfree(to_i2c_client(dev)); 289 kfree(to_i2c_client(dev));
@@ -301,9 +325,15 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
301static const struct dev_pm_ops i2c_device_pm_ops = { 325static const struct dev_pm_ops i2c_device_pm_ops = {
302 .suspend = i2c_device_pm_suspend, 326 .suspend = i2c_device_pm_suspend,
303 .resume = i2c_device_pm_resume, 327 .resume = i2c_device_pm_resume,
304 .runtime_suspend = i2c_device_runtime_suspend, 328 .freeze = i2c_device_pm_freeze,
305 .runtime_resume = i2c_device_runtime_resume, 329 .thaw = i2c_device_pm_thaw,
306 .runtime_idle = i2c_device_runtime_idle, 330 .poweroff = i2c_device_pm_poweroff,
331 .restore = i2c_device_pm_restore,
332 SET_RUNTIME_PM_OPS(
333 pm_generic_runtime_suspend,
334 pm_generic_runtime_resume,
335 pm_generic_runtime_idle
336 )
307}; 337};
308 338
309struct bus_type i2c_bus_type = { 339struct bus_type i2c_bus_type = {
@@ -312,8 +342,6 @@ struct bus_type i2c_bus_type = {
312 .probe = i2c_device_probe, 342 .probe = i2c_device_probe,
313 .remove = i2c_device_remove, 343 .remove = i2c_device_remove,
314 .shutdown = i2c_device_shutdown, 344 .shutdown = i2c_device_shutdown,
315 .suspend = i2c_device_suspend,
316 .resume = i2c_device_resume,
317 .pm = &i2c_device_pm_ops, 345 .pm = &i2c_device_pm_ops,
318}; 346};
319EXPORT_SYMBOL_GPL(i2c_bus_type); 347EXPORT_SYMBOL_GPL(i2c_bus_type);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40c..330d2a423362 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig"
46source "drivers/infiniband/hw/ehca/Kconfig" 46source "drivers/infiniband/hw/ehca/Kconfig"
47source "drivers/infiniband/hw/amso1100/Kconfig" 47source "drivers/infiniband/hw/amso1100/Kconfig"
48source "drivers/infiniband/hw/cxgb3/Kconfig" 48source "drivers/infiniband/hw/cxgb3/Kconfig"
49source "drivers/infiniband/hw/cxgb4/Kconfig"
49source "drivers/infiniband/hw/mlx4/Kconfig" 50source "drivers/infiniband/hw/mlx4/Kconfig"
50source "drivers/infiniband/hw/nes/Kconfig" 51source "drivers/infiniband/hw/nes/Kconfig"
51 52
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e4496241..0c4e589d746e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ 4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ 5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
7obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
7obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ 8obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
8obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 9obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
9obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 10obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..b930b8110a63 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
79static DEFINE_IDR(tcp_ps); 79static DEFINE_IDR(tcp_ps);
80static DEFINE_IDR(udp_ps); 80static DEFINE_IDR(udp_ps);
81static DEFINE_IDR(ipoib_ps); 81static DEFINE_IDR(ipoib_ps);
82static int next_port;
83 82
84struct cma_device { 83struct cma_device {
85 struct list_head list; 84 struct list_head list;
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1677 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1676 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1678 return -EINVAL; 1677 return -EINVAL;
1679 1678
1680 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1679 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1680 GFP_KERNEL);
1681 if (!id->route.path_rec) { 1681 if (!id->route.path_rec) {
1682 ret = -ENOMEM; 1682 ret = -ENOMEM;
1683 goto err; 1683 goto err;
1684 } 1684 }
1685 1685
1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1687 id->route.num_paths = num_paths; 1686 id->route.num_paths = num_paths;
1688 return 0; 1687 return 0;
1689err: 1688err:
@@ -1970,47 +1969,33 @@ err1:
1970 1969
1971static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1970static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1972{ 1971{
1973 struct rdma_bind_list *bind_list; 1972 static unsigned int last_used_port;
1974 int port, ret, low, high; 1973 int low, high, remaining;
1975 1974 unsigned int rover;
1976 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1977 if (!bind_list)
1978 return -ENOMEM;
1979
1980retry:
1981 /* FIXME: add proper port randomization per like inet_csk_get_port */
1982 do {
1983 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1984 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1985
1986 if (ret)
1987 goto err1;
1988 1975
1989 inet_get_local_port_range(&low, &high); 1976 inet_get_local_port_range(&low, &high);
1990 if (port > high) { 1977 remaining = (high - low) + 1;
1991 if (next_port != low) { 1978 rover = net_random() % remaining + low;
1992 idr_remove(ps, port); 1979retry:
1993 next_port = low; 1980 if (last_used_port != rover &&
1994 goto retry; 1981 !idr_find(ps, (unsigned short) rover)) {
1995 } 1982 int ret = cma_alloc_port(ps, id_priv, rover);
1996 ret = -EADDRNOTAVAIL; 1983 /*
1997 goto err2; 1984 * Remember previously used port number in order to avoid
1985 * re-using same port immediately after it is closed.
1986 */
1987 if (!ret)
1988 last_used_port = rover;
1989 if (ret != -EADDRNOTAVAIL)
1990 return ret;
1998 } 1991 }
1999 1992 if (--remaining) {
2000 if (port == high) 1993 rover++;
2001 next_port = low; 1994 if ((rover < low) || (rover > high))
2002 else 1995 rover = low;
2003 next_port = port + 1; 1996 goto retry;
2004 1997 }
2005 bind_list->ps = ps; 1998 return -EADDRNOTAVAIL;
2006 bind_list->port = (unsigned short) port;
2007 cma_bind_port(bind_list, id_priv);
2008 return 0;
2009err2:
2010 idr_remove(ps, port);
2011err1:
2012 kfree(bind_list);
2013 return ret;
2014} 1999}
2015 2000
2016static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2001static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
2995 2980
2996static int __init cma_init(void) 2981static int __init cma_init(void)
2997{ 2982{
2998 int ret, low, high, remaining; 2983 int ret;
2999
3000 get_random_bytes(&next_port, sizeof next_port);
3001 inet_get_local_port_range(&low, &high);
3002 remaining = (high - low) + 1;
3003 next_port = ((unsigned int) next_port % remaining) + low;
3004 2984
3005 cma_wq = create_singlethread_workqueue("rdma_cm"); 2985 cma_wq = create_singlethread_workqueue("rdma_cm");
3006 if (!cma_wq) 2986 if (!cma_wq)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1df1194aeba4..6dc7b77d5d29 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
291 } 291 }
292 292
293 if (mad_reg_req) { 293 if (mad_reg_req) {
294 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); 294 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
295 if (!reg_req) { 295 if (!reg_req) {
296 ret = ERR_PTR(-ENOMEM); 296 ret = ERR_PTR(-ENOMEM);
297 goto error3; 297 goto error3;
298 } 298 }
299 /* Make a copy of the MAD registration request */
300 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
301 } 299 }
302 300
303 /* Now, fill in the various structures */ 301 /* Now, fill in the various structures */
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460c..46474842cfe9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
1181 file->filp = filp; 1181 file->filp = filp;
1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev); 1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
1183 1183
1184 return 0; 1184 return nonseekable_open(inode, filp);
1185} 1185}
1186 1186
1187static int ib_ucm_close(struct inode *inode, struct file *filp) 1187static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = {
1229 .release = ib_ucm_close, 1229 .release = ib_ucm_close,
1230 .write = ib_ucm_write, 1230 .write = ib_ucm_write,
1231 .poll = ib_ucm_poll, 1231 .poll = ib_ucm_poll,
1232 .llseek = no_llseek,
1232}; 1233};
1233 1234
1234static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1235static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121e..ac7edc24165c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
1220 1220
1221 filp->private_data = file; 1221 filp->private_data = file;
1222 file->filp = filp; 1222 file->filp = filp;
1223 return 0; 1223
1224 return nonseekable_open(inode, filp);
1224} 1225}
1225 1226
1226static int ucma_close(struct inode *inode, struct file *filp) 1227static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
1250 .release = ucma_close, 1251 .release = ucma_close,
1251 .write = ucma_write, 1252 .write = ucma_write,
1252 .poll = ucma_poll, 1253 .poll = ucma_poll,
1254 .llseek = no_llseek,
1253}; 1255};
1254 1256
1255static struct miscdevice ucma_misc = { 1257static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c8..6babb72b39fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
781{ 781{
782 struct ib_umad_port *port; 782 struct ib_umad_port *port;
783 struct ib_umad_file *file; 783 struct ib_umad_file *file;
784 int ret = 0; 784 int ret;
785 785
786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev); 786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
787 if (port) 787 if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
814 814
815 list_add_tail(&file->port_list, &port->file_list); 815 list_add_tail(&file->port_list, &port->file_list);
816 816
817 ret = nonseekable_open(inode, filp);
818
817out: 819out:
818 mutex_unlock(&port->file_mutex); 820 mutex_unlock(&port->file_mutex);
819 return ret; 821 return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
866 .compat_ioctl = ib_umad_compat_ioctl, 868 .compat_ioctl = ib_umad_compat_ioctl,
867#endif 869#endif
868 .open = ib_umad_open, 870 .open = ib_umad_open,
869 .release = ib_umad_close 871 .release = ib_umad_close,
872 .llseek = no_llseek,
870}; 873};
871 874
872static int ib_umad_sm_open(struct inode *inode, struct file *filp) 875static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
903 906
904 filp->private_data = port; 907 filp->private_data = port;
905 908
906 return 0; 909 return nonseekable_open(inode, filp);
907 910
908fail: 911fail:
909 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 912 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
933static const struct file_operations umad_sm_fops = { 936static const struct file_operations umad_sm_fops = {
934 .owner = THIS_MODULE, 937 .owner = THIS_MODULE,
935 .open = ib_umad_sm_open, 938 .open = ib_umad_sm_open,
936 .release = ib_umad_sm_close 939 .release = ib_umad_sm_close,
940 .llseek = no_llseek,
937}; 941};
938 942
939static struct ib_client umad_client = { 943static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb3526254426..ec83e9fe387b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
369 .read = ib_uverbs_event_read, 369 .read = ib_uverbs_event_read,
370 .poll = ib_uverbs_event_poll, 370 .poll = ib_uverbs_event_poll,
371 .release = ib_uverbs_event_close, 371 .release = ib_uverbs_event_close,
372 .fasync = ib_uverbs_event_fasync 372 .fasync = ib_uverbs_event_fasync,
373 .llseek = no_llseek,
373}; 374};
374 375
375void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 376void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
623 624
624 filp->private_data = file; 625 filp->private_data = file;
625 626
626 return 0; 627 return nonseekable_open(inode, filp);
627 628
628err_module: 629err_module:
629 module_put(dev->ib_dev->owner); 630 module_put(dev->ib_dev->owner);
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
651 .owner = THIS_MODULE, 652 .owner = THIS_MODULE,
652 .write = ib_uverbs_write, 653 .write = ib_uverbs_write,
653 .open = ib_uverbs_open, 654 .open = ib_uverbs_open,
654 .release = ib_uverbs_close 655 .release = ib_uverbs_close,
656 .llseek = no_llseek,
655}; 657};
656 658
657static const struct file_operations uverbs_mmap_fops = { 659static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
659 .write = ib_uverbs_write, 661 .write = ib_uverbs_write,
660 .mmap = ib_uverbs_mmap, 662 .mmap = ib_uverbs_mmap,
661 .open = ib_uverbs_open, 663 .open = ib_uverbs_open,
662 .release = ib_uverbs_close 664 .release = ib_uverbs_close,
665 .llseek = no_llseek,
663}; 666};
664 667
665static struct ib_client uverbs_client = { 668static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f98361..6ae698e68775 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
250struct sp_chunk { 250struct sp_chunk {
251 struct sp_chunk *next; 251 struct sp_chunk *next;
252 dma_addr_t dma_addr; 252 dma_addr_t dma_addr;
253 DECLARE_PCI_UNMAP_ADDR(mapping); 253 DEFINE_DMA_UNMAP_ADDR(mapping);
254 u16 head; 254 u16 head;
255 u16 shared_ptr[0]; 255 u16 shared_ptr[0];
256}; 256};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e90..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
49 return -ENOMEM; 49 return -ENOMEM;
50 50
51 new_head->dma_addr = dma_addr; 51 new_head->dma_addr = dma_addr;
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 52 dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 53
54 new_head->next = NULL; 54 new_head->next = NULL;
55 new_head->head = 0; 55 new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
81 while (root) { 81 while (root) {
82 next = root->next; 82 next = root->next;
83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, 83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping)); 84 dma_unmap_addr(root, mapping));
85 root = next; 85 root = next;
86 } 86 }
87} 87}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f413..49e0e8533f74 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
258{ 258{
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, 259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 mq->msg_pool.host, pci_unmap_addr(mq, mapping)); 260 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
261} 261}
262 262
263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
278 NULL, /* peer (currently unknown) */ 278 NULL, /* peer (currently unknown) */
279 C2_MQ_HOST_TARGET); 279 C2_MQ_HOST_TARGET);
280 280
281 pci_unmap_addr_set(mq, mapping, mq->host_dma); 281 dma_unmap_addr_set(mq, mapping, mq->host_dma);
282 282
283 return 0; 283 return 0;
284} 284}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94a..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
71 u8 __iomem *adapter; 71 u8 __iomem *adapter;
72 } msg_pool; 72 } msg_pool;
73 dma_addr_t host_dma; 73 dma_addr_t host_dma;
74 DECLARE_PCI_UNMAP_ADDR(mapping); 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96a..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
50 50
51struct c2_buf_list { 51struct c2_buf_list {
52 void *buf; 52 void *buf;
53 DECLARE_PCI_UNMAP_ADDR(mapping) 53 DEFINE_DMA_UNMAP_ADDR(mapping);
54}; 54};
55 55
56 56
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef60..85cfae4cad71 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
524 err = -ENOMEM; 524 err = -ENOMEM;
525 goto bail1; 525 goto bail1;
526 } 526 }
527 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
529 (unsigned long long) c2dev->rep_vq.host_dma); 529 (unsigned long long) c2dev->rep_vq.host_dma);
530 c2_mq_rep_init(&c2dev->rep_vq, 530 c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
545 err = -ENOMEM; 545 err = -ENOMEM;
546 goto bail2; 546 goto bail2;
547 } 547 }
548 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 548 dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
550 (unsigned long long) c2dev->aeq.host_dma); 550 (unsigned long long) c2dev->aeq.host_dma);
551 c2_mq_rep_init(&c2dev->aeq, 551 c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
596 bail3: 596 bail3:
597 dma_free_coherent(&c2dev->pcidev->dev, 597 dma_free_coherent(&c2dev->pcidev->dev,
598 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 c2dev->aeq.q_size * c2dev->aeq.msg_size,
599 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); 599 q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
600 bail2: 600 bail2:
601 dma_free_coherent(&c2dev->pcidev->dev, 601 dma_free_coherent(&c2dev->pcidev->dev,
602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
603 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); 603 q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
604 bail1: 604 bail1:
605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
606 bail0: 606 bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
637 dma_free_coherent(&c2dev->pcidev->dev, 637 dma_free_coherent(&c2dev->pcidev->dev,
638 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.q_size * c2dev->aeq.msg_size,
639 c2dev->aeq.msg_pool.host, 639 c2dev->aeq.msg_pool.host,
640 pci_unmap_addr(&c2dev->aeq, mapping)); 640 dma_unmap_addr(&c2dev->aeq, mapping));
641 641
642 /* Free the verbs reply queue */ 642 /* Free the verbs reply queue */
643 dma_free_coherent(&c2dev->pcidev->dev, 643 dma_free_coherent(&c2dev->pcidev->dev,
644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
645 c2dev->rep_vq.msg_pool.host, 645 c2dev->rep_vq.msg_pool.host,
646 pci_unmap_addr(&c2dev->rep_vq, mapping)); 646 dma_unmap_addr(&c2dev->rep_vq, mapping));
647 647
648 /* Free the MQ shared pointer pool */ 648 /* Free the MQ shared pointer pool */
649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1e..005b7b52bc1e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
174 kfree(cq->sw_queue); 174 kfree(cq->sw_queue);
175 return -ENOMEM; 175 return -ENOMEM;
176 } 176 }
177 pci_unmap_addr_set(cq, mapping, cq->dma_addr); 177 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
178 memset(cq->queue, 0, size); 178 memset(cq->queue, 0, size);
179 setup.id = cq->cqid; 179 setup.id = cq->cqid;
180 setup.base_addr = (u64) (cq->dma_addr); 180 setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
297 goto err4; 297 goto err4;
298 298
299 memset(wq->queue, 0, depth * sizeof(union t3_wr)); 299 memset(wq->queue, 0, depth * sizeof(union t3_wr));
300 pci_unmap_addr_set(wq, mapping, wq->dma_addr); 300 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
302 if (!kernel_domain) 302 if (!kernel_domain)
303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
326 (1UL << (cq->size_log2)) 326 (1UL << (cq->size_log2))
327 * sizeof(struct t3_cqe), cq->queue, 327 * sizeof(struct t3_cqe), cq->queue,
328 pci_unmap_addr(cq, mapping)); 328 dma_unmap_addr(cq, mapping));
329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); 329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
330 return err; 330 return err;
331} 331}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
337 (1UL << (wq->size_log2)) 337 (1UL << (wq->size_log2))
338 * sizeof(union t3_wr), wq->queue, 338 * sizeof(union t3_wr), wq->queue,
339 pci_unmap_addr(wq, mapping)); 339 dma_unmap_addr(wq, mapping));
340 kfree(wq->sq); 340 kfree(wq->sq);
341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); 341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
342 kfree(wq->rq); 342 kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
537 err = -ENOMEM; 537 err = -ENOMEM;
538 goto err; 538 goto err;
539 } 539 }
540 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, 540 dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
541 rdev_p->ctrl_qp.dma_addr); 541 rdev_p->ctrl_qp.dma_addr);
542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
543 memset(rdev_p->ctrl_qp.workq, 0, 543 memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
584 (1UL << T3_CTRL_QP_SIZE_LOG2) 584 (1UL << T3_CTRL_QP_SIZE_LOG2)
585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, 585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
586 pci_unmap_addr(&rdev_p->ctrl_qp, mapping)); 586 dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); 587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
588} 588}
589 589
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c560..8f0caf7d4482 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */ 71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
72 union t3_wr *workq; /* the work request queue */ 72 union t3_wr *workq; /* the work request queue */
73 dma_addr_t dma_addr; /* pci bus address of the workq */ 73 dma_addr_t dma_addr; /* pci bus address of the workq */
74 DECLARE_PCI_UNMAP_ADDR(mapping) 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 void __iomem *doorbell; 75 void __iomem *doorbell;
76}; 76};
77 77
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c5..e5ddb63e7d23 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
691struct t3_wq { 691struct t3_wq {
692 union t3_wr *queue; /* DMA accessable memory */ 692 union t3_wr *queue; /* DMA accessable memory */
693 dma_addr_t dma_addr; /* DMA address for HW */ 693 dma_addr_t dma_addr; /* DMA address for HW */
694 DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */ 694 DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
695 u32 error; /* 1 once we go to ERROR */ 695 u32 error; /* 1 once we go to ERROR */
696 u32 qpid; 696 u32 qpid;
697 u32 wptr; /* idx to next available WR slot */ 697 u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
718 u32 wptr; 718 u32 wptr;
719 u32 size_log2; 719 u32 size_log2;
720 dma_addr_t dma_addr; 720 dma_addr_t dma_addr;
721 DECLARE_PCI_UNMAP_ADDR(mapping) 721 DEFINE_DMA_UNMAP_ADDR(mapping);
722 struct t3_cqe *queue; 722 struct t3_cqe *queue;
723 struct t3_cqe *sw_queue; 723 struct t3_cqe *sw_queue;
724 u32 sw_rptr; 724 u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47MODULE_LICENSE("Dual BSD/GPL"); 47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51
52static void open_rnic_dev(struct t3cdev *); 50static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 51static void close_rnic_dev(struct t3cdev *);
54static void iwch_event_handler(struct t3cdev *, u32, u32); 52static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef03296276..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
102module_param(cong_flavor, uint, 0644); 102module_param(cong_flavor, uint, 0644);
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); 103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104 104
105static void process_work(struct work_struct *work);
106static struct workqueue_struct *workq; 105static struct workqueue_struct *workq;
107static DECLARE_WORK(skb_work, process_work);
108 106
109static struct sk_buff_head rxq; 107static struct sk_buff_head rxq;
110static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111 108
112static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 109static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113static void ep_timeout(unsigned long arg); 110static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
151 return -EIO; 148 return -EIO;
152 } 149 }
153 error = l2t_send(tdev, skb, l2e); 150 error = l2t_send(tdev, skb, l2e);
154 if (error) 151 if (error < 0)
155 kfree_skb(skb); 152 kfree_skb(skb);
156 return error; 153 return error;
157} 154}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
167 return -EIO; 164 return -EIO;
168 } 165 }
169 error = cxgb3_ofld_send(tdev, skb); 166 error = cxgb3_ofld_send(tdev, skb);
170 if (error) 167 if (error < 0)
171 kfree_skb(skb); 168 kfree_skb(skb);
172 return error; 169 return error;
173} 170}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
302 put_ep(&ep->com); 299 put_ep(&ep->com);
303} 300}
304 301
305static void process_work(struct work_struct *work)
306{
307 struct sk_buff *skb = NULL;
308 void *ep;
309 struct t3cdev *tdev;
310 int ret;
311
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
317 kfree_skb(skb);
318
319 /*
320 * ep was referenced in sched(), and is freed here.
321 */
322 put_ep((struct iwch_ep_common *)ep);
323 }
324}
325
326static int status2errno(int status) 302static int status2errno(int status)
327{ 303{
328 switch (status) { 304 switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2157 2133
2158/* 2134/*
2159 * All the CM events are handled on a work queue to have a safe context. 2135 * All the CM events are handled on a work queue to have a safe context.
2136 * These are the real handlers that are called from the work queue.
2160 */ 2137 */
2138static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2139 [CPL_ACT_ESTABLISH] = act_establish,
2140 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2141 [CPL_RX_DATA] = rx_data,
2142 [CPL_TX_DMA_ACK] = tx_ack,
2143 [CPL_ABORT_RPL_RSS] = abort_rpl,
2144 [CPL_ABORT_RPL] = abort_rpl,
2145 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2146 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2147 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2148 [CPL_PASS_ESTABLISH] = pass_establish,
2149 [CPL_PEER_CLOSE] = peer_close,
2150 [CPL_ABORT_REQ_RSS] = peer_abort,
2151 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2152 [CPL_RDMA_TERMINATE] = terminate,
2153 [CPL_RDMA_EC_STATUS] = ec_status,
2154};
2155
2156static void process_work(struct work_struct *work)
2157{
2158 struct sk_buff *skb = NULL;
2159 void *ep;
2160 struct t3cdev *tdev;
2161 int ret;
2162
2163 while ((skb = skb_dequeue(&rxq))) {
2164 ep = *((void **) (skb->cb));
2165 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2166 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2167 if (ret & CPL_RET_BUF_DONE)
2168 kfree_skb(skb);
2169
2170 /*
2171 * ep was referenced in sched(), and is freed here.
2172 */
2173 put_ep((struct iwch_ep_common *)ep);
2174 }
2175}
2176
2177static DECLARE_WORK(skb_work, process_work);
2178
2161static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2179static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2162{ 2180{
2163 struct iwch_ep_common *epc = ctx; 2181 struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2189 return CPL_RET_BUF_DONE; 2207 return CPL_RET_BUF_DONE;
2190} 2208}
2191 2209
2210/*
2211 * All upcalls from the T3 Core go to sched() to schedule the
2212 * processing on a work queue.
2213 */
2214cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2215 [CPL_ACT_ESTABLISH] = sched,
2216 [CPL_ACT_OPEN_RPL] = sched,
2217 [CPL_RX_DATA] = sched,
2218 [CPL_TX_DMA_ACK] = sched,
2219 [CPL_ABORT_RPL_RSS] = sched,
2220 [CPL_ABORT_RPL] = sched,
2221 [CPL_PASS_OPEN_RPL] = sched,
2222 [CPL_CLOSE_LISTSRV_RPL] = sched,
2223 [CPL_PASS_ACCEPT_REQ] = sched,
2224 [CPL_PASS_ESTABLISH] = sched,
2225 [CPL_PEER_CLOSE] = sched,
2226 [CPL_CLOSE_CON_RPL] = sched,
2227 [CPL_ABORT_REQ_RSS] = sched,
2228 [CPL_RDMA_TERMINATE] = sched,
2229 [CPL_RDMA_EC_STATUS] = sched,
2230 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2231};
2232
2192int __init iwch_cm_init(void) 2233int __init iwch_cm_init(void)
2193{ 2234{
2194 skb_queue_head_init(&rxq); 2235 skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
2197 if (!workq) 2238 if (!workq)
2198 return -ENOMEM; 2239 return -ENOMEM;
2199 2240
2200 /*
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2203 */
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2220
2221 /*
2222 * These are the real handlers that are called from a
2223 * work queue.
2224 */
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2240 return 0; 2241 return 0;
2241} 2242}
2242 2243
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 000000000000..ccb85eaaad75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
1config INFINIBAND_CXGB4
2 tristate "Chelsio T4 RDMA Driver"
3 depends on CHELSIO_T4 && INET
4 select GENERIC_ALLOCATOR
5 ---help---
6 This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
7 10GbE adapters.
8
9 For general information about Chelsio and our products, visit
10 our website at <http://www.chelsio.com>.
11
12 For customer support, please visit our customer support page at
13 <http://www.chelsio.com/support.htm>.
14
15 Please send feedback to <linux-bugs@chelsio.com>.
16
17 To compile this driver as a module, choose M here: the module
18 will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 000000000000..e31a499f0172
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
1EXTRA_CFLAGS += -Idrivers/net/cxgb4
2
3obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
4
5iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 000000000000..30ce0a8eca09
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41
42#include <net/neighbour.h>
43#include <net/netevent.h>
44#include <net/route.h>
45
46#include "iw_cxgb4.h"
47
48static char *states[] = {
49 "idle",
50 "listen",
51 "connecting",
52 "mpa_wait_req",
53 "mpa_req_sent",
54 "mpa_req_rcvd",
55 "mpa_rep_sent",
56 "fpdu_mode",
57 "aborting",
58 "closing",
59 "moribund",
60 "dead",
61 NULL,
62};
63
64int c4iw_max_read_depth = 8;
65module_param(c4iw_max_read_depth, int, 0644);
66MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
67
68static int enable_tcp_timestamps;
69module_param(enable_tcp_timestamps, int, 0644);
70MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
71
72static int enable_tcp_sack;
73module_param(enable_tcp_sack, int, 0644);
74MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
75
76static int enable_tcp_window_scaling = 1;
77module_param(enable_tcp_window_scaling, int, 0644);
78MODULE_PARM_DESC(enable_tcp_window_scaling,
79 "Enable tcp window scaling (default=1)");
80
81int c4iw_debug;
82module_param(c4iw_debug, int, 0644);
83MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
84
85static int peer2peer;
86module_param(peer2peer, int, 0644);
87MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
88
89static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
90module_param(p2p_type, int, 0644);
91MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
92 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
93
94static int ep_timeout_secs = 60;
95module_param(ep_timeout_secs, int, 0644);
96MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
97 "in seconds (default=60)");
98
99static int mpa_rev = 1;
100module_param(mpa_rev, int, 0644);
101MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
102 "1 is spec compliant. (default=1)");
103
104static int markers_enabled;
105module_param(markers_enabled, int, 0644);
106MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
107
108static int crc_enabled = 1;
109module_param(crc_enabled, int, 0644);
110MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
111
112static int rcv_win = 256 * 1024;
113module_param(rcv_win, int, 0644);
114MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
115
116static int snd_win = 32 * 1024;
117module_param(snd_win, int, 0644);
118MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
119
120static struct workqueue_struct *workq;
121
122static struct sk_buff_head rxq;
123
124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
125static void ep_timeout(unsigned long arg);
126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
127
128static LIST_HEAD(timeout_list);
129static spinlock_t timeout_lock;
130
131static void start_ep_timer(struct c4iw_ep *ep)
132{
133 PDBG("%s ep %p\n", __func__, ep);
134 if (timer_pending(&ep->timer)) {
135 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
136 del_timer_sync(&ep->timer);
137 } else
138 c4iw_get_ep(&ep->com);
139 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
140 ep->timer.data = (unsigned long)ep;
141 ep->timer.function = ep_timeout;
142 add_timer(&ep->timer);
143}
144
145static void stop_ep_timer(struct c4iw_ep *ep)
146{
147 PDBG("%s ep %p\n", __func__, ep);
148 if (!timer_pending(&ep->timer)) {
149 printk(KERN_ERR "%s timer stopped when its not running! "
150 "ep %p state %u\n", __func__, ep, ep->com.state);
151 WARN_ON(1);
152 return;
153 }
154 del_timer_sync(&ep->timer);
155 c4iw_put_ep(&ep->com);
156}
157
158static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
159 struct l2t_entry *l2e)
160{
161 int error = 0;
162
163 if (c4iw_fatal_error(rdev)) {
164 kfree_skb(skb);
165 PDBG("%s - device in error state - dropping\n", __func__);
166 return -EIO;
167 }
168 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
169 if (error < 0)
170 kfree_skb(skb);
171 return error;
172}
173
174int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
175{
176 int error = 0;
177
178 if (c4iw_fatal_error(rdev)) {
179 kfree_skb(skb);
180 PDBG("%s - device in error state - dropping\n", __func__);
181 return -EIO;
182 }
183 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
184 if (error < 0)
185 kfree_skb(skb);
186 return error;
187}
188
189static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
190{
191 struct cpl_tid_release *req;
192
193 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
194 if (!skb)
195 return;
196 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
197 INIT_TP_WR(req, hwtid);
198 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
199 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
200 c4iw_ofld_send(rdev, skb);
201 return;
202}
203
204static void set_emss(struct c4iw_ep *ep, u16 opt)
205{
206 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
207 ep->mss = ep->emss;
208 if (GET_TCPOPT_TSTAMP(opt))
209 ep->emss -= 12;
210 if (ep->emss < 128)
211 ep->emss = 128;
212 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
213 ep->mss, ep->emss);
214}
215
216static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
217{
218 unsigned long flags;
219 enum c4iw_ep_state state;
220
221 spin_lock_irqsave(&epc->lock, flags);
222 state = epc->state;
223 spin_unlock_irqrestore(&epc->lock, flags);
224 return state;
225}
226
227static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
228{
229 epc->state = new;
230}
231
232static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
233{
234 unsigned long flags;
235
236 spin_lock_irqsave(&epc->lock, flags);
237 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
238 __state_set(epc, new);
239 spin_unlock_irqrestore(&epc->lock, flags);
240 return;
241}
242
243static void *alloc_ep(int size, gfp_t gfp)
244{
245 struct c4iw_ep_common *epc;
246
247 epc = kzalloc(size, gfp);
248 if (epc) {
249 kref_init(&epc->kref);
250 spin_lock_init(&epc->lock);
251 init_waitqueue_head(&epc->waitq);
252 }
253 PDBG("%s alloc ep %p\n", __func__, epc);
254 return epc;
255}
256
257void _c4iw_free_ep(struct kref *kref)
258{
259 struct c4iw_ep *ep;
260
261 ep = container_of(kref, struct c4iw_ep, com.kref);
262 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
263 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
264 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
265 dst_release(ep->dst);
266 cxgb4_l2t_release(ep->l2t);
267 }
268 kfree(ep);
269}
270
271static void release_ep_resources(struct c4iw_ep *ep)
272{
273 set_bit(RELEASE_RESOURCES, &ep->com.flags);
274 c4iw_put_ep(&ep->com);
275}
276
277static int status2errno(int status)
278{
279 switch (status) {
280 case CPL_ERR_NONE:
281 return 0;
282 case CPL_ERR_CONN_RESET:
283 return -ECONNRESET;
284 case CPL_ERR_ARP_MISS:
285 return -EHOSTUNREACH;
286 case CPL_ERR_CONN_TIMEDOUT:
287 return -ETIMEDOUT;
288 case CPL_ERR_TCAM_FULL:
289 return -ENOMEM;
290 case CPL_ERR_CONN_EXIST:
291 return -EADDRINUSE;
292 default:
293 return -EIO;
294 }
295}
296
297/*
298 * Try and reuse skbs already allocated...
299 */
300static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
301{
302 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
303 skb_trim(skb, 0);
304 skb_get(skb);
305 skb_reset_transport_header(skb);
306 } else {
307 skb = alloc_skb(len, gfp);
308 }
309 return skb;
310}
311
312static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
313 __be32 peer_ip, __be16 local_port,
314 __be16 peer_port, u8 tos)
315{
316 struct rtable *rt;
317 struct flowi fl = {
318 .oif = 0,
319 .nl_u = {
320 .ip4_u = {
321 .daddr = peer_ip,
322 .saddr = local_ip,
323 .tos = tos}
324 },
325 .proto = IPPROTO_TCP,
326 .uli_u = {
327 .ports = {
328 .sport = local_port,
329 .dport = peer_port}
330 }
331 };
332
333 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
334 return NULL;
335 return rt;
336}
337
338static void arp_failure_discard(void *handle, struct sk_buff *skb)
339{
340 PDBG("%s c4iw_dev %p\n", __func__, handle);
341 kfree_skb(skb);
342}
343
344/*
345 * Handle an ARP failure for an active open.
346 */
347static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
348{
349 printk(KERN_ERR MOD "ARP failure duing connect\n");
350 kfree_skb(skb);
351}
352
353/*
354 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
355 * and send it along.
356 */
357static void abort_arp_failure(void *handle, struct sk_buff *skb)
358{
359 struct c4iw_rdev *rdev = handle;
360 struct cpl_abort_req *req = cplhdr(skb);
361
362 PDBG("%s rdev %p\n", __func__, rdev);
363 req->cmd = CPL_ABORT_NO_RST;
364 c4iw_ofld_send(rdev, skb);
365}
366
367static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
368{
369 unsigned int flowclen = 80;
370 struct fw_flowc_wr *flowc;
371 int i;
372
373 skb = get_skb(skb, flowclen, GFP_KERNEL);
374 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
375
376 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
377 FW_FLOWC_WR_NPARAMS(8));
378 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
379 16)) | FW_WR_FLOWID(ep->hwtid));
380
381 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
382 flowc->mnemval[0].val = cpu_to_be32(0);
383 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
384 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
385 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
386 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
387 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
388 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
389 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
390 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
391 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
392 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
393 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
394 flowc->mnemval[6].val = cpu_to_be32(snd_win);
395 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
396 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
397 /* Pad WR to 16 byte boundary */
398 flowc->mnemval[8].mnemonic = 0;
399 flowc->mnemval[8].val = 0;
400 for (i = 0; i < 9; i++) {
401 flowc->mnemval[i].r4[0] = 0;
402 flowc->mnemval[i].r4[1] = 0;
403 flowc->mnemval[i].r4[2] = 0;
404 }
405
406 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
407 c4iw_ofld_send(&ep->com.dev->rdev, skb);
408}
409
410static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
411{
412 struct cpl_close_con_req *req;
413 struct sk_buff *skb;
414 int wrlen = roundup(sizeof *req, 16);
415
416 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
417 skb = get_skb(NULL, wrlen, gfp);
418 if (!skb) {
419 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
420 return -ENOMEM;
421 }
422 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
423 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
424 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
425 memset(req, 0, wrlen);
426 INIT_TP_WR(req, ep->hwtid);
427 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
428 ep->hwtid));
429 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
430}
431
432static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
433{
434 struct cpl_abort_req *req;
435 int wrlen = roundup(sizeof *req, 16);
436
437 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
438 skb = get_skb(skb, wrlen, gfp);
439 if (!skb) {
440 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
441 __func__);
442 return -ENOMEM;
443 }
444 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
445 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
446 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
447 memset(req, 0, wrlen);
448 INIT_TP_WR(req, ep->hwtid);
449 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
450 req->cmd = CPL_ABORT_SEND_RST;
451 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
452}
453
454static int send_connect(struct c4iw_ep *ep)
455{
456 struct cpl_act_open_req *req;
457 struct sk_buff *skb;
458 u64 opt0;
459 u32 opt2;
460 unsigned int mtu_idx;
461 int wscale;
462 int wrlen = roundup(sizeof *req, 16);
463
464 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
465
466 skb = get_skb(NULL, wrlen, GFP_KERNEL);
467 if (!skb) {
468 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
469 __func__);
470 return -ENOMEM;
471 }
472 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
473
474 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
475 wscale = compute_wscale(rcv_win);
476 opt0 = KEEP_ALIVE(1) |
477 WND_SCALE(wscale) |
478 MSS_IDX(mtu_idx) |
479 L2T_IDX(ep->l2t->idx) |
480 TX_CHAN(ep->tx_chan) |
481 SMAC_SEL(ep->smac_idx) |
482 DSCP(ep->tos) |
483 RCV_BUFSIZ(rcv_win>>10);
484 opt2 = RX_CHANNEL(0) |
485 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
486 if (enable_tcp_timestamps)
487 opt2 |= TSTAMPS_EN(1);
488 if (enable_tcp_sack)
489 opt2 |= SACK_EN(1);
490 if (wscale && enable_tcp_window_scaling)
491 opt2 |= WND_SCALE_EN(1);
492 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
493
494 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
495 INIT_TP_WR(req, 0);
496 OPCODE_TID(req) = cpu_to_be32(
497 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
498 req->local_port = ep->com.local_addr.sin_port;
499 req->peer_port = ep->com.remote_addr.sin_port;
500 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
501 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
502 req->opt0 = cpu_to_be64(opt0);
503 req->params = 0;
504 req->opt2 = cpu_to_be32(opt2);
505 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
506}
507
508static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
509{
510 int mpalen, wrlen;
511 struct fw_ofld_tx_data_wr *req;
512 struct mpa_message *mpa;
513
514 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
515
516 BUG_ON(skb_cloned(skb));
517
518 mpalen = sizeof(*mpa) + ep->plen;
519 wrlen = roundup(mpalen + sizeof *req, 16);
520 skb = get_skb(skb, wrlen, GFP_KERNEL);
521 if (!skb) {
522 connect_reply_upcall(ep, -ENOMEM);
523 return;
524 }
525 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
526
527 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
528 memset(req, 0, wrlen);
529 req->op_to_immdlen = cpu_to_be32(
530 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
531 FW_WR_COMPL(1) |
532 FW_WR_IMMDLEN(mpalen));
533 req->flowid_len16 = cpu_to_be32(
534 FW_WR_FLOWID(ep->hwtid) |
535 FW_WR_LEN16(wrlen >> 4));
536 req->plen = cpu_to_be32(mpalen);
537 req->tunnel_to_proxy = cpu_to_be32(
538 FW_OFLD_TX_DATA_WR_FLUSH(1) |
539 FW_OFLD_TX_DATA_WR_SHOVE(1));
540
541 mpa = (struct mpa_message *)(req + 1);
542 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
543 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
544 (markers_enabled ? MPA_MARKERS : 0);
545 mpa->private_data_size = htons(ep->plen);
546 mpa->revision = mpa_rev;
547
548 if (ep->plen)
549 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
550
551 /*
552 * Reference the mpa skb. This ensures the data area
553 * will remain in memory until the hw acks the tx.
554 * Function fw4_ack() will deref it.
555 */
556 skb_get(skb);
557 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
558 BUG_ON(ep->mpa_skb);
559 ep->mpa_skb = skb;
560 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
561 start_ep_timer(ep);
562 state_set(&ep->com, MPA_REQ_SENT);
563 ep->mpa_attr.initiator = 1;
564 return;
565}
566
567static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
568{
569 int mpalen, wrlen;
570 struct fw_ofld_tx_data_wr *req;
571 struct mpa_message *mpa;
572 struct sk_buff *skb;
573
574 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
575
576 mpalen = sizeof(*mpa) + plen;
577 wrlen = roundup(mpalen + sizeof *req, 16);
578
579 skb = get_skb(NULL, wrlen, GFP_KERNEL);
580 if (!skb) {
581 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
582 return -ENOMEM;
583 }
584 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
585
586 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
587 memset(req, 0, wrlen);
588 req->op_to_immdlen = cpu_to_be32(
589 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
590 FW_WR_COMPL(1) |
591 FW_WR_IMMDLEN(mpalen));
592 req->flowid_len16 = cpu_to_be32(
593 FW_WR_FLOWID(ep->hwtid) |
594 FW_WR_LEN16(wrlen >> 4));
595 req->plen = cpu_to_be32(mpalen);
596 req->tunnel_to_proxy = cpu_to_be32(
597 FW_OFLD_TX_DATA_WR_FLUSH(1) |
598 FW_OFLD_TX_DATA_WR_SHOVE(1));
599
600 mpa = (struct mpa_message *)(req + 1);
601 memset(mpa, 0, sizeof(*mpa));
602 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
603 mpa->flags = MPA_REJECT;
604 mpa->revision = mpa_rev;
605 mpa->private_data_size = htons(plen);
606 if (plen)
607 memcpy(mpa->private_data, pdata, plen);
608
609 /*
610 * Reference the mpa skb again. This ensures the data area
611 * will remain in memory until the hw acks the tx.
612 * Function fw4_ack() will deref it.
613 */
614 skb_get(skb);
615 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
616 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
617 BUG_ON(ep->mpa_skb);
618 ep->mpa_skb = skb;
619 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
620}
621
622static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
623{
624 int mpalen, wrlen;
625 struct fw_ofld_tx_data_wr *req;
626 struct mpa_message *mpa;
627 struct sk_buff *skb;
628
629 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
630
631 mpalen = sizeof(*mpa) + plen;
632 wrlen = roundup(mpalen + sizeof *req, 16);
633
634 skb = get_skb(NULL, wrlen, GFP_KERNEL);
635 if (!skb) {
636 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
637 return -ENOMEM;
638 }
639 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
640
641 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
642 memset(req, 0, wrlen);
643 req->op_to_immdlen = cpu_to_be32(
644 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
645 FW_WR_COMPL(1) |
646 FW_WR_IMMDLEN(mpalen));
647 req->flowid_len16 = cpu_to_be32(
648 FW_WR_FLOWID(ep->hwtid) |
649 FW_WR_LEN16(wrlen >> 4));
650 req->plen = cpu_to_be32(mpalen);
651 req->tunnel_to_proxy = cpu_to_be32(
652 FW_OFLD_TX_DATA_WR_FLUSH(1) |
653 FW_OFLD_TX_DATA_WR_SHOVE(1));
654
655 mpa = (struct mpa_message *)(req + 1);
656 memset(mpa, 0, sizeof(*mpa));
657 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
658 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
659 (markers_enabled ? MPA_MARKERS : 0);
660 mpa->revision = mpa_rev;
661 mpa->private_data_size = htons(plen);
662 if (plen)
663 memcpy(mpa->private_data, pdata, plen);
664
665 /*
666 * Reference the mpa skb. This ensures the data area
667 * will remain in memory until the hw acks the tx.
668 * Function fw4_ack() will deref it.
669 */
670 skb_get(skb);
671 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
672 ep->mpa_skb = skb;
673 state_set(&ep->com, MPA_REP_SENT);
674 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
675}
676
677static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
678{
679 struct c4iw_ep *ep;
680 struct cpl_act_establish *req = cplhdr(skb);
681 unsigned int tid = GET_TID(req);
682 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
683 struct tid_info *t = dev->rdev.lldi.tids;
684
685 ep = lookup_atid(t, atid);
686
687 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
688 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
689
690 dst_confirm(ep->dst);
691
692 /* setup the hwtid for this connection */
693 ep->hwtid = tid;
694 cxgb4_insert_tid(t, ep, tid);
695
696 ep->snd_seq = be32_to_cpu(req->snd_isn);
697 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
698
699 set_emss(ep, ntohs(req->tcp_opt));
700
701 /* dealloc the atid */
702 cxgb4_free_atid(t, atid);
703
704 /* start MPA negotiation */
705 send_flowc(ep, NULL);
706 send_mpa_req(ep, skb);
707
708 return 0;
709}
710
711static void close_complete_upcall(struct c4iw_ep *ep)
712{
713 struct iw_cm_event event;
714
715 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CLOSE;
718 if (ep->com.cm_id) {
719 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
720 ep, ep->com.cm_id, ep->hwtid);
721 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
722 ep->com.cm_id->rem_ref(ep->com.cm_id);
723 ep->com.cm_id = NULL;
724 ep->com.qp = NULL;
725 }
726}
727
728static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
729{
730 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
731 close_complete_upcall(ep);
732 state_set(&ep->com, ABORTING);
733 return send_abort(ep, skb, gfp);
734}
735
736static void peer_close_upcall(struct c4iw_ep *ep)
737{
738 struct iw_cm_event event;
739
740 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
741 memset(&event, 0, sizeof(event));
742 event.event = IW_CM_EVENT_DISCONNECT;
743 if (ep->com.cm_id) {
744 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
745 ep, ep->com.cm_id, ep->hwtid);
746 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
747 }
748}
749
750static void peer_abort_upcall(struct c4iw_ep *ep)
751{
752 struct iw_cm_event event;
753
754 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
755 memset(&event, 0, sizeof(event));
756 event.event = IW_CM_EVENT_CLOSE;
757 event.status = -ECONNRESET;
758 if (ep->com.cm_id) {
759 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
760 ep->com.cm_id, ep->hwtid);
761 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
762 ep->com.cm_id->rem_ref(ep->com.cm_id);
763 ep->com.cm_id = NULL;
764 ep->com.qp = NULL;
765 }
766}
767
768static void connect_reply_upcall(struct c4iw_ep *ep, int status)
769{
770 struct iw_cm_event event;
771
772 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
773 memset(&event, 0, sizeof(event));
774 event.event = IW_CM_EVENT_CONNECT_REPLY;
775 event.status = status;
776 event.local_addr = ep->com.local_addr;
777 event.remote_addr = ep->com.remote_addr;
778
779 if ((status == 0) || (status == -ECONNREFUSED)) {
780 event.private_data_len = ep->plen;
781 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
782 }
783 if (ep->com.cm_id) {
784 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
785 ep->hwtid, status);
786 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
787 }
788 if (status < 0) {
789 ep->com.cm_id->rem_ref(ep->com.cm_id);
790 ep->com.cm_id = NULL;
791 ep->com.qp = NULL;
792 }
793}
794
795static void connect_request_upcall(struct c4iw_ep *ep)
796{
797 struct iw_cm_event event;
798
799 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
800 memset(&event, 0, sizeof(event));
801 event.event = IW_CM_EVENT_CONNECT_REQUEST;
802 event.local_addr = ep->com.local_addr;
803 event.remote_addr = ep->com.remote_addr;
804 event.private_data_len = ep->plen;
805 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
806 event.provider_data = ep;
807 if (state_read(&ep->parent_ep->com) != DEAD) {
808 c4iw_get_ep(&ep->com);
809 ep->parent_ep->com.cm_id->event_handler(
810 ep->parent_ep->com.cm_id,
811 &event);
812 }
813 c4iw_put_ep(&ep->parent_ep->com);
814 ep->parent_ep = NULL;
815}
816
817static void established_upcall(struct c4iw_ep *ep)
818{
819 struct iw_cm_event event;
820
821 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
822 memset(&event, 0, sizeof(event));
823 event.event = IW_CM_EVENT_ESTABLISHED;
824 if (ep->com.cm_id) {
825 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
826 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
827 }
828}
829
830static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
831{
832 struct cpl_rx_data_ack *req;
833 struct sk_buff *skb;
834 int wrlen = roundup(sizeof *req, 16);
835
836 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
837 skb = get_skb(NULL, wrlen, GFP_KERNEL);
838 if (!skb) {
839 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
840 return 0;
841 }
842
843 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
844 memset(req, 0, wrlen);
845 INIT_TP_WR(req, ep->hwtid);
846 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
847 ep->hwtid));
848 req->credit_dack = cpu_to_be32(credits);
849 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
850 c4iw_ofld_send(&ep->com.dev->rdev, skb);
851 return credits;
852}
853
854static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
855{
856 struct mpa_message *mpa;
857 u16 plen;
858 struct c4iw_qp_attributes attrs;
859 enum c4iw_qp_attr_mask mask;
860 int err;
861
862 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
863
864 /*
865 * Stop mpa timer. If it expired, then the state has
866 * changed and we bail since ep_timeout already aborted
867 * the connection.
868 */
869 stop_ep_timer(ep);
870 if (state_read(&ep->com) != MPA_REQ_SENT)
871 return;
872
873 /*
874 * If we get more than the supported amount of private data
875 * then we must fail this connection.
876 */
877 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
878 err = -EINVAL;
879 goto err;
880 }
881
882 /*
883 * copy the new data into our accumulation buffer.
884 */
885 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
886 skb->len);
887 ep->mpa_pkt_len += skb->len;
888
889 /*
890 * if we don't even have the mpa message, then bail.
891 */
892 if (ep->mpa_pkt_len < sizeof(*mpa))
893 return;
894 mpa = (struct mpa_message *) ep->mpa_pkt;
895
896 /* Validate MPA header. */
897 if (mpa->revision != mpa_rev) {
898 err = -EPROTO;
899 goto err;
900 }
901 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
902 err = -EPROTO;
903 goto err;
904 }
905
906 plen = ntohs(mpa->private_data_size);
907
908 /*
909 * Fail if there's too much private data.
910 */
911 if (plen > MPA_MAX_PRIVATE_DATA) {
912 err = -EPROTO;
913 goto err;
914 }
915
916 /*
917 * If plen does not account for pkt size
918 */
919 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
920 err = -EPROTO;
921 goto err;
922 }
923
924 ep->plen = (u8) plen;
925
926 /*
927 * If we don't have all the pdata yet, then bail.
928 * We'll continue process when more data arrives.
929 */
930 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
931 return;
932
933 if (mpa->flags & MPA_REJECT) {
934 err = -ECONNREFUSED;
935 goto err;
936 }
937
938 /*
939 * If we get here we have accumulated the entire mpa
940 * start reply message including private data. And
941 * the MPA header is valid.
942 */
943 state_set(&ep->com, FPDU_MODE);
944 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
945 ep->mpa_attr.recv_marker_enabled = markers_enabled;
946 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
947 ep->mpa_attr.version = mpa_rev;
948 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
949 FW_RI_INIT_P2PTYPE_DISABLED;
950 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
951 "xmit_marker_enabled=%d, version=%d\n", __func__,
952 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
953 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
954
955 attrs.mpa_attr = ep->mpa_attr;
956 attrs.max_ird = ep->ird;
957 attrs.max_ord = ep->ord;
958 attrs.llp_stream_handle = ep;
959 attrs.next_state = C4IW_QP_STATE_RTS;
960
961 mask = C4IW_QP_ATTR_NEXT_STATE |
962 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
963 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
964
965 /* bind QP and TID with INIT_WR */
966 err = c4iw_modify_qp(ep->com.qp->rhp,
967 ep->com.qp, mask, &attrs, 1);
968 if (err)
969 goto err;
970 goto out;
971err:
972 abort_connection(ep, skb, GFP_KERNEL);
973out:
974 connect_reply_upcall(ep, err);
975 return;
976}
977
978static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
979{
980 struct mpa_message *mpa;
981 u16 plen;
982
983 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
984
985 if (state_read(&ep->com) != MPA_REQ_WAIT)
986 return;
987
988 /*
989 * If we get more than the supported amount of private data
990 * then we must fail this connection.
991 */
992 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
993 stop_ep_timer(ep);
994 abort_connection(ep, skb, GFP_KERNEL);
995 return;
996 }
997
998 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
999
1000 /*
1001 * Copy the new data into our accumulation buffer.
1002 */
1003 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1004 skb->len);
1005 ep->mpa_pkt_len += skb->len;
1006
1007 /*
1008 * If we don't even have the mpa message, then bail.
1009 * We'll continue process when more data arrives.
1010 */
1011 if (ep->mpa_pkt_len < sizeof(*mpa))
1012 return;
1013
1014 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1015 stop_ep_timer(ep);
1016 mpa = (struct mpa_message *) ep->mpa_pkt;
1017
1018 /*
1019 * Validate MPA Header.
1020 */
1021 if (mpa->revision != mpa_rev) {
1022 abort_connection(ep, skb, GFP_KERNEL);
1023 return;
1024 }
1025
1026 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1027 abort_connection(ep, skb, GFP_KERNEL);
1028 return;
1029 }
1030
1031 plen = ntohs(mpa->private_data_size);
1032
1033 /*
1034 * Fail if there's too much private data.
1035 */
1036 if (plen > MPA_MAX_PRIVATE_DATA) {
1037 abort_connection(ep, skb, GFP_KERNEL);
1038 return;
1039 }
1040
1041 /*
1042 * If plen does not account for pkt size
1043 */
1044 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1045 abort_connection(ep, skb, GFP_KERNEL);
1046 return;
1047 }
1048 ep->plen = (u8) plen;
1049
1050 /*
1051 * If we don't have all the pdata yet, then bail.
1052 */
1053 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1054 return;
1055
1056 /*
1057 * If we get here we have accumulated the entire mpa
1058 * start reply message including private data.
1059 */
1060 ep->mpa_attr.initiator = 0;
1061 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1062 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1063 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1064 ep->mpa_attr.version = mpa_rev;
1065 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1066 FW_RI_INIT_P2PTYPE_DISABLED;
1067 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1068 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1069 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1070 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1071 ep->mpa_attr.p2p_type);
1072
1073 state_set(&ep->com, MPA_REQ_RCVD);
1074
1075 /* drive upcall */
1076 connect_request_upcall(ep);
1077 return;
1078}
1079
1080static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1081{
1082 struct c4iw_ep *ep;
1083 struct cpl_rx_data *hdr = cplhdr(skb);
1084 unsigned int dlen = ntohs(hdr->len);
1085 unsigned int tid = GET_TID(hdr);
1086 struct tid_info *t = dev->rdev.lldi.tids;
1087
1088 ep = lookup_tid(t, tid);
1089 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1090 skb_pull(skb, sizeof(*hdr));
1091 skb_trim(skb, dlen);
1092
1093 ep->rcv_seq += dlen;
1094 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1095
1096 /* update RX credits */
1097 update_rx_credits(ep, dlen);
1098
1099 switch (state_read(&ep->com)) {
1100 case MPA_REQ_SENT:
1101 process_mpa_reply(ep, skb);
1102 break;
1103 case MPA_REQ_WAIT:
1104 process_mpa_request(ep, skb);
1105 break;
1106 case MPA_REP_SENT:
1107 break;
1108 default:
1109 printk(KERN_ERR MOD "%s Unexpected streaming data."
1110 " ep %p state %d tid %u\n",
1111 __func__, ep, state_read(&ep->com), ep->hwtid);
1112
1113 /*
1114 * The ep will timeout and inform the ULP of the failure.
1115 * See ep_timeout().
1116 */
1117 break;
1118 }
1119 return 0;
1120}
1121
1122static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1123{
1124 struct c4iw_ep *ep;
1125 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1126 unsigned long flags;
1127 int release = 0;
1128 unsigned int tid = GET_TID(rpl);
1129 struct tid_info *t = dev->rdev.lldi.tids;
1130
1131 ep = lookup_tid(t, tid);
1132 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1133 BUG_ON(!ep);
1134 spin_lock_irqsave(&ep->com.lock, flags);
1135 switch (ep->com.state) {
1136 case ABORTING:
1137 __state_set(&ep->com, DEAD);
1138 release = 1;
1139 break;
1140 default:
1141 printk(KERN_ERR "%s ep %p state %d\n",
1142 __func__, ep, ep->com.state);
1143 break;
1144 }
1145 spin_unlock_irqrestore(&ep->com.lock, flags);
1146
1147 if (release)
1148 release_ep_resources(ep);
1149 return 0;
1150}
1151
1152/*
1153 * Return whether a failed active open has allocated a TID
1154 */
1155static inline int act_open_has_tid(int status)
1156{
1157 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1158 status != CPL_ERR_ARP_MISS;
1159}
1160
1161static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1162{
1163 struct c4iw_ep *ep;
1164 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1165 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1166 ntohl(rpl->atid_status)));
1167 struct tid_info *t = dev->rdev.lldi.tids;
1168 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1169
1170 ep = lookup_atid(t, atid);
1171
1172 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1173 status, status2errno(status));
1174
1175 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1176 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1177 atid);
1178 return 0;
1179 }
1180
1181 connect_reply_upcall(ep, status2errno(status));
1182 state_set(&ep->com, DEAD);
1183
1184 if (status && act_open_has_tid(status))
1185 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1186
1187 cxgb4_free_atid(t, atid);
1188 dst_release(ep->dst);
1189 cxgb4_l2t_release(ep->l2t);
1190 c4iw_put_ep(&ep->com);
1191
1192 return 0;
1193}
1194
1195static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1196{
1197 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1198 struct tid_info *t = dev->rdev.lldi.tids;
1199 unsigned int stid = GET_TID(rpl);
1200 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1201
1202 if (!ep) {
1203 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1204 return 0;
1205 }
1206 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1207 rpl->status, status2errno(rpl->status));
1208 ep->com.rpl_err = status2errno(rpl->status);
1209 ep->com.rpl_done = 1;
1210 wake_up(&ep->com.waitq);
1211
1212 return 0;
1213}
1214
1215static int listen_stop(struct c4iw_listen_ep *ep)
1216{
1217 struct sk_buff *skb;
1218 struct cpl_close_listsvr_req *req;
1219
1220 PDBG("%s ep %p\n", __func__, ep);
1221 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1222 if (!skb) {
1223 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1224 return -ENOMEM;
1225 }
1226 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1227 INIT_TP_WR(req, 0);
1228 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1229 ep->stid));
1230 req->reply_ctrl = cpu_to_be16(
1231 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1232 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1233 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1234}
1235
1236static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1237{
1238 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1239 struct tid_info *t = dev->rdev.lldi.tids;
1240 unsigned int stid = GET_TID(rpl);
1241 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1242
1243 PDBG("%s ep %p\n", __func__, ep);
1244 ep->com.rpl_err = status2errno(rpl->status);
1245 ep->com.rpl_done = 1;
1246 wake_up(&ep->com.waitq);
1247 return 0;
1248}
1249
1250static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1251 struct cpl_pass_accept_req *req)
1252{
1253 struct cpl_pass_accept_rpl *rpl;
1254 unsigned int mtu_idx;
1255 u64 opt0;
1256 u32 opt2;
1257 int wscale;
1258
1259 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1260 BUG_ON(skb_cloned(skb));
1261 skb_trim(skb, sizeof(*rpl));
1262 skb_get(skb);
1263 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1264 wscale = compute_wscale(rcv_win);
1265 opt0 = KEEP_ALIVE(1) |
1266 WND_SCALE(wscale) |
1267 MSS_IDX(mtu_idx) |
1268 L2T_IDX(ep->l2t->idx) |
1269 TX_CHAN(ep->tx_chan) |
1270 SMAC_SEL(ep->smac_idx) |
1271 DSCP(ep->tos) |
1272 RCV_BUFSIZ(rcv_win>>10);
1273 opt2 = RX_CHANNEL(0) |
1274 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1275
1276 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1277 opt2 |= TSTAMPS_EN(1);
1278 if (enable_tcp_sack && req->tcpopt.sack)
1279 opt2 |= SACK_EN(1);
1280 if (wscale && enable_tcp_window_scaling)
1281 opt2 |= WND_SCALE_EN(1);
1282
1283 rpl = cplhdr(skb);
1284 INIT_TP_WR(rpl, ep->hwtid);
1285 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1286 ep->hwtid));
1287 rpl->opt0 = cpu_to_be64(opt0);
1288 rpl->opt2 = cpu_to_be32(opt2);
1289 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
1290 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1291
1292 return;
1293}
1294
1295static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1296 struct sk_buff *skb)
1297{
1298 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1299 peer_ip);
1300 BUG_ON(skb_cloned(skb));
1301 skb_trim(skb, sizeof(struct cpl_tid_release));
1302 skb_get(skb);
1303 release_tid(&dev->rdev, hwtid, skb);
1304 return;
1305}
1306
1307static void get_4tuple(struct cpl_pass_accept_req *req,
1308 __be32 *local_ip, __be32 *peer_ip,
1309 __be16 *local_port, __be16 *peer_port)
1310{
1311 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1312 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1313 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1314 struct tcphdr *tcp = (struct tcphdr *)
1315 ((u8 *)(req + 1) + eth_len + ip_len);
1316
1317 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1318 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1319 ntohs(tcp->dest));
1320
1321 *peer_ip = ip->saddr;
1322 *local_ip = ip->daddr;
1323 *peer_port = tcp->source;
1324 *local_port = tcp->dest;
1325
1326 return;
1327}
1328
1329static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1330{
1331 struct c4iw_ep *child_ep, *parent_ep;
1332 struct cpl_pass_accept_req *req = cplhdr(skb);
1333 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1334 struct tid_info *t = dev->rdev.lldi.tids;
1335 unsigned int hwtid = GET_TID(req);
1336 struct dst_entry *dst;
1337 struct l2t_entry *l2t;
1338 struct rtable *rt;
1339 __be32 local_ip, peer_ip;
1340 __be16 local_port, peer_port;
1341 struct net_device *pdev;
1342 u32 tx_chan, smac_idx;
1343 u16 rss_qid;
1344 u32 mtu;
1345 int step;
1346 int txq_idx;
1347
1348 parent_ep = lookup_stid(t, stid);
1349 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1350
1351 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1352
1353 if (state_read(&parent_ep->com) != LISTEN) {
1354 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1355 __func__);
1356 goto reject;
1357 }
1358
1359 /* Find output route */
1360 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1361 GET_POPEN_TOS(ntohl(req->tos_stid)));
1362 if (!rt) {
1363 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1364 __func__);
1365 goto reject;
1366 }
1367 dst = &rt->u.dst;
1368 if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1369 pdev = ip_dev_find(&init_net, peer_ip);
1370 BUG_ON(!pdev);
1371 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1372 pdev, 0);
1373 mtu = pdev->mtu;
1374 tx_chan = cxgb4_port_chan(pdev);
1375 smac_idx = tx_chan << 1;
1376 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1377 txq_idx = cxgb4_port_idx(pdev) * step;
1378 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1379 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1380 dev_put(pdev);
1381 } else {
1382 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1383 dst->neighbour->dev, 0);
1384 mtu = dst_mtu(dst);
1385 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1386 smac_idx = tx_chan << 1;
1387 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1388 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1389 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1390 rss_qid = dev->rdev.lldi.rxq_ids[
1391 cxgb4_port_idx(dst->neighbour->dev) * step];
1392 }
1393 if (!l2t) {
1394 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1395 __func__);
1396 dst_release(dst);
1397 goto reject;
1398 }
1399
1400 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1401 if (!child_ep) {
1402 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1403 __func__);
1404 cxgb4_l2t_release(l2t);
1405 dst_release(dst);
1406 goto reject;
1407 }
1408 state_set(&child_ep->com, CONNECTING);
1409 child_ep->com.dev = dev;
1410 child_ep->com.cm_id = NULL;
1411 child_ep->com.local_addr.sin_family = PF_INET;
1412 child_ep->com.local_addr.sin_port = local_port;
1413 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1414 child_ep->com.remote_addr.sin_family = PF_INET;
1415 child_ep->com.remote_addr.sin_port = peer_port;
1416 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1417 c4iw_get_ep(&parent_ep->com);
1418 child_ep->parent_ep = parent_ep;
1419 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1420 child_ep->l2t = l2t;
1421 child_ep->dst = dst;
1422 child_ep->hwtid = hwtid;
1423 child_ep->tx_chan = tx_chan;
1424 child_ep->smac_idx = smac_idx;
1425 child_ep->rss_qid = rss_qid;
1426 child_ep->mtu = mtu;
1427 child_ep->txq_idx = txq_idx;
1428
1429 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1430 tx_chan, smac_idx, rss_qid);
1431
1432 init_timer(&child_ep->timer);
1433 cxgb4_insert_tid(t, child_ep, hwtid);
1434 accept_cr(child_ep, peer_ip, skb, req);
1435 goto out;
1436reject:
1437 reject_cr(dev, hwtid, peer_ip, skb);
1438out:
1439 return 0;
1440}
1441
1442static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1443{
1444 struct c4iw_ep *ep;
1445 struct cpl_pass_establish *req = cplhdr(skb);
1446 struct tid_info *t = dev->rdev.lldi.tids;
1447 unsigned int tid = GET_TID(req);
1448
1449 ep = lookup_tid(t, tid);
1450 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1451 ep->snd_seq = be32_to_cpu(req->snd_isn);
1452 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1453
1454 set_emss(ep, ntohs(req->tcp_opt));
1455
1456 dst_confirm(ep->dst);
1457 state_set(&ep->com, MPA_REQ_WAIT);
1458 start_ep_timer(ep);
1459 send_flowc(ep, skb);
1460
1461 return 0;
1462}
1463
1464static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1465{
1466 struct cpl_peer_close *hdr = cplhdr(skb);
1467 struct c4iw_ep *ep;
1468 struct c4iw_qp_attributes attrs;
1469 unsigned long flags;
1470 int disconnect = 1;
1471 int release = 0;
1472 int closing = 0;
1473 struct tid_info *t = dev->rdev.lldi.tids;
1474 unsigned int tid = GET_TID(hdr);
1475 int start_timer = 0;
1476 int stop_timer = 0;
1477
1478 ep = lookup_tid(t, tid);
1479 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1480 dst_confirm(ep->dst);
1481
1482 spin_lock_irqsave(&ep->com.lock, flags);
1483 switch (ep->com.state) {
1484 case MPA_REQ_WAIT:
1485 __state_set(&ep->com, CLOSING);
1486 break;
1487 case MPA_REQ_SENT:
1488 __state_set(&ep->com, CLOSING);
1489 connect_reply_upcall(ep, -ECONNRESET);
1490 break;
1491 case MPA_REQ_RCVD:
1492
1493 /*
1494 * We're gonna mark this puppy DEAD, but keep
1495 * the reference on it until the ULP accepts or
1496 * rejects the CR. Also wake up anyone waiting
1497 * in rdma connection migration (see c4iw_accept_cr()).
1498 */
1499 __state_set(&ep->com, CLOSING);
1500 ep->com.rpl_done = 1;
1501 ep->com.rpl_err = -ECONNRESET;
1502 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1503 wake_up(&ep->com.waitq);
1504 break;
1505 case MPA_REP_SENT:
1506 __state_set(&ep->com, CLOSING);
1507 ep->com.rpl_done = 1;
1508 ep->com.rpl_err = -ECONNRESET;
1509 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1510 wake_up(&ep->com.waitq);
1511 break;
1512 case FPDU_MODE:
1513 start_timer = 1;
1514 __state_set(&ep->com, CLOSING);
1515 closing = 1;
1516 peer_close_upcall(ep);
1517 break;
1518 case ABORTING:
1519 disconnect = 0;
1520 break;
1521 case CLOSING:
1522 __state_set(&ep->com, MORIBUND);
1523 disconnect = 0;
1524 break;
1525 case MORIBUND:
1526 stop_timer = 1;
1527 if (ep->com.cm_id && ep->com.qp) {
1528 attrs.next_state = C4IW_QP_STATE_IDLE;
1529 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1530 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1531 }
1532 close_complete_upcall(ep);
1533 __state_set(&ep->com, DEAD);
1534 release = 1;
1535 disconnect = 0;
1536 break;
1537 case DEAD:
1538 disconnect = 0;
1539 break;
1540 default:
1541 BUG_ON(1);
1542 }
1543 spin_unlock_irqrestore(&ep->com.lock, flags);
1544 if (closing) {
1545 attrs.next_state = C4IW_QP_STATE_CLOSING;
1546 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1547 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1548 }
1549 if (start_timer)
1550 start_ep_timer(ep);
1551 if (stop_timer)
1552 stop_ep_timer(ep);
1553 if (disconnect)
1554 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1555 if (release)
1556 release_ep_resources(ep);
1557 return 0;
1558}
1559
1560/*
1561 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1562 */
1563static int is_neg_adv_abort(unsigned int status)
1564{
1565 return status == CPL_ERR_RTX_NEG_ADVICE ||
1566 status == CPL_ERR_PERSIST_NEG_ADVICE;
1567}
1568
1569static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1570{
1571 struct cpl_abort_req_rss *req = cplhdr(skb);
1572 struct c4iw_ep *ep;
1573 struct cpl_abort_rpl *rpl;
1574 struct sk_buff *rpl_skb;
1575 struct c4iw_qp_attributes attrs;
1576 int ret;
1577 int release = 0;
1578 unsigned long flags;
1579 struct tid_info *t = dev->rdev.lldi.tids;
1580 unsigned int tid = GET_TID(req);
1581 int stop_timer = 0;
1582
1583 ep = lookup_tid(t, tid);
1584 if (is_neg_adv_abort(req->status)) {
1585 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1586 ep->hwtid);
1587 return 0;
1588 }
1589 spin_lock_irqsave(&ep->com.lock, flags);
1590 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1591 ep->com.state);
1592 switch (ep->com.state) {
1593 case CONNECTING:
1594 break;
1595 case MPA_REQ_WAIT:
1596 stop_timer = 1;
1597 break;
1598 case MPA_REQ_SENT:
1599 stop_timer = 1;
1600 connect_reply_upcall(ep, -ECONNRESET);
1601 break;
1602 case MPA_REP_SENT:
1603 ep->com.rpl_done = 1;
1604 ep->com.rpl_err = -ECONNRESET;
1605 PDBG("waking up ep %p\n", ep);
1606 wake_up(&ep->com.waitq);
1607 break;
1608 case MPA_REQ_RCVD:
1609
1610 /*
1611 * We're gonna mark this puppy DEAD, but keep
1612 * the reference on it until the ULP accepts or
1613 * rejects the CR. Also wake up anyone waiting
1614 * in rdma connection migration (see c4iw_accept_cr()).
1615 */
1616 ep->com.rpl_done = 1;
1617 ep->com.rpl_err = -ECONNRESET;
1618 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1619 wake_up(&ep->com.waitq);
1620 break;
1621 case MORIBUND:
1622 case CLOSING:
1623 stop_timer = 1;
1624 /*FALLTHROUGH*/
1625 case FPDU_MODE:
1626 if (ep->com.cm_id && ep->com.qp) {
1627 attrs.next_state = C4IW_QP_STATE_ERROR;
1628 ret = c4iw_modify_qp(ep->com.qp->rhp,
1629 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1630 &attrs, 1);
1631 if (ret)
1632 printk(KERN_ERR MOD
1633 "%s - qp <- error failed!\n",
1634 __func__);
1635 }
1636 peer_abort_upcall(ep);
1637 break;
1638 case ABORTING:
1639 break;
1640 case DEAD:
1641 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1642 spin_unlock_irqrestore(&ep->com.lock, flags);
1643 return 0;
1644 default:
1645 BUG_ON(1);
1646 break;
1647 }
1648 dst_confirm(ep->dst);
1649 if (ep->com.state != ABORTING) {
1650 __state_set(&ep->com, DEAD);
1651 release = 1;
1652 }
1653 spin_unlock_irqrestore(&ep->com.lock, flags);
1654
1655 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1656 if (!rpl_skb) {
1657 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1658 __func__);
1659 release = 1;
1660 goto out;
1661 }
1662 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1663 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1664 INIT_TP_WR(rpl, ep->hwtid);
1665 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1666 rpl->cmd = CPL_ABORT_NO_RST;
1667 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1668out:
1669 if (stop_timer)
1670 stop_ep_timer(ep);
1671 if (release)
1672 release_ep_resources(ep);
1673 return 0;
1674}
1675
1676static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1677{
1678 struct c4iw_ep *ep;
1679 struct c4iw_qp_attributes attrs;
1680 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1681 unsigned long flags;
1682 int release = 0;
1683 struct tid_info *t = dev->rdev.lldi.tids;
1684 unsigned int tid = GET_TID(rpl);
1685 int stop_timer = 0;
1686
1687 ep = lookup_tid(t, tid);
1688
1689 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1690 BUG_ON(!ep);
1691
1692 /* The cm_id may be null if we failed to connect */
1693 spin_lock_irqsave(&ep->com.lock, flags);
1694 switch (ep->com.state) {
1695 case CLOSING:
1696 __state_set(&ep->com, MORIBUND);
1697 break;
1698 case MORIBUND:
1699 stop_timer = 1;
1700 if ((ep->com.cm_id) && (ep->com.qp)) {
1701 attrs.next_state = C4IW_QP_STATE_IDLE;
1702 c4iw_modify_qp(ep->com.qp->rhp,
1703 ep->com.qp,
1704 C4IW_QP_ATTR_NEXT_STATE,
1705 &attrs, 1);
1706 }
1707 close_complete_upcall(ep);
1708 __state_set(&ep->com, DEAD);
1709 release = 1;
1710 break;
1711 case ABORTING:
1712 case DEAD:
1713 break;
1714 default:
1715 BUG_ON(1);
1716 break;
1717 }
1718 spin_unlock_irqrestore(&ep->com.lock, flags);
1719 if (stop_timer)
1720 stop_ep_timer(ep);
1721 if (release)
1722 release_ep_resources(ep);
1723 return 0;
1724}
1725
1726static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1727{
1728 struct c4iw_ep *ep;
1729 struct cpl_rdma_terminate *term = cplhdr(skb);
1730 struct tid_info *t = dev->rdev.lldi.tids;
1731 unsigned int tid = GET_TID(term);
1732
1733 ep = lookup_tid(t, tid);
1734
1735 if (state_read(&ep->com) != FPDU_MODE)
1736 return 0;
1737
1738 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1739 skb_pull(skb, sizeof *term);
1740 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1741 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1742 skb->len);
1743 ep->com.qp->attr.terminate_msg_len = skb->len;
1744 ep->com.qp->attr.is_terminate_local = 0;
1745 return 0;
1746}
1747
1748/*
1749 * Upcall from the adapter indicating data has been transmitted.
1750 * For us its just the single MPA request or reply. We can now free
1751 * the skb holding the mpa message.
1752 */
1753static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1754{
1755 struct c4iw_ep *ep;
1756 struct cpl_fw4_ack *hdr = cplhdr(skb);
1757 u8 credits = hdr->credits;
1758 unsigned int tid = GET_TID(hdr);
1759 struct tid_info *t = dev->rdev.lldi.tids;
1760
1761
1762 ep = lookup_tid(t, tid);
1763 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1764 if (credits == 0) {
1765 PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1766 __func__, ep, ep->hwtid, state_read(&ep->com));
1767 return 0;
1768 }
1769
1770 dst_confirm(ep->dst);
1771 if (ep->mpa_skb) {
1772 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1773 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1774 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1775 kfree_skb(ep->mpa_skb);
1776 ep->mpa_skb = NULL;
1777 }
1778 return 0;
1779}
1780
1781int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1782{
1783 int err;
1784 struct c4iw_ep *ep = to_ep(cm_id);
1785 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1786
1787 if (state_read(&ep->com) == DEAD) {
1788 c4iw_put_ep(&ep->com);
1789 return -ECONNRESET;
1790 }
1791 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1792 if (mpa_rev == 0)
1793 abort_connection(ep, NULL, GFP_KERNEL);
1794 else {
1795 err = send_mpa_reject(ep, pdata, pdata_len);
1796 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1797 }
1798 c4iw_put_ep(&ep->com);
1799 return 0;
1800}
1801
1802int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1803{
1804 int err;
1805 struct c4iw_qp_attributes attrs;
1806 enum c4iw_qp_attr_mask mask;
1807 struct c4iw_ep *ep = to_ep(cm_id);
1808 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1809 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1810
1811 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1812 if (state_read(&ep->com) == DEAD) {
1813 err = -ECONNRESET;
1814 goto err;
1815 }
1816
1817 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1818 BUG_ON(!qp);
1819
1820 if ((conn_param->ord > c4iw_max_read_depth) ||
1821 (conn_param->ird > c4iw_max_read_depth)) {
1822 abort_connection(ep, NULL, GFP_KERNEL);
1823 err = -EINVAL;
1824 goto err;
1825 }
1826
1827 cm_id->add_ref(cm_id);
1828 ep->com.cm_id = cm_id;
1829 ep->com.qp = qp;
1830
1831 ep->ird = conn_param->ird;
1832 ep->ord = conn_param->ord;
1833
1834 if (peer2peer && ep->ird == 0)
1835 ep->ird = 1;
1836
1837 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838
1839 /* bind QP to EP and move to RTS */
1840 attrs.mpa_attr = ep->mpa_attr;
1841 attrs.max_ird = ep->ird;
1842 attrs.max_ord = ep->ord;
1843 attrs.llp_stream_handle = ep;
1844 attrs.next_state = C4IW_QP_STATE_RTS;
1845
1846 /* bind QP and TID with INIT_WR */
1847 mask = C4IW_QP_ATTR_NEXT_STATE |
1848 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1849 C4IW_QP_ATTR_MPA_ATTR |
1850 C4IW_QP_ATTR_MAX_IRD |
1851 C4IW_QP_ATTR_MAX_ORD;
1852
1853 err = c4iw_modify_qp(ep->com.qp->rhp,
1854 ep->com.qp, mask, &attrs, 1);
1855 if (err)
1856 goto err1;
1857 err = send_mpa_reply(ep, conn_param->private_data,
1858 conn_param->private_data_len);
1859 if (err)
1860 goto err1;
1861
1862 state_set(&ep->com, FPDU_MODE);
1863 established_upcall(ep);
1864 c4iw_put_ep(&ep->com);
1865 return 0;
1866err1:
1867 ep->com.cm_id = NULL;
1868 ep->com.qp = NULL;
1869 cm_id->rem_ref(cm_id);
1870err:
1871 c4iw_put_ep(&ep->com);
1872 return err;
1873}
1874
1875int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1876{
1877 int err = 0;
1878 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1879 struct c4iw_ep *ep;
1880 struct rtable *rt;
1881 struct net_device *pdev;
1882 int step;
1883
1884 if ((conn_param->ord > c4iw_max_read_depth) ||
1885 (conn_param->ird > c4iw_max_read_depth)) {
1886 err = -EINVAL;
1887 goto out;
1888 }
1889 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1890 if (!ep) {
1891 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1892 err = -ENOMEM;
1893 goto out;
1894 }
1895 init_timer(&ep->timer);
1896 ep->plen = conn_param->private_data_len;
1897 if (ep->plen)
1898 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1899 conn_param->private_data, ep->plen);
1900 ep->ird = conn_param->ird;
1901 ep->ord = conn_param->ord;
1902
1903 if (peer2peer && ep->ord == 0)
1904 ep->ord = 1;
1905
1906 cm_id->add_ref(cm_id);
1907 ep->com.dev = dev;
1908 ep->com.cm_id = cm_id;
1909 ep->com.qp = get_qhp(dev, conn_param->qpn);
1910 BUG_ON(!ep->com.qp);
1911 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1912 ep->com.qp, cm_id);
1913
1914 /*
1915 * Allocate an active TID to initiate a TCP connection.
1916 */
1917 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
1918 if (ep->atid == -1) {
1919 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1920 err = -ENOMEM;
1921 goto fail2;
1922 }
1923
1924 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
1925 ntohl(cm_id->local_addr.sin_addr.s_addr),
1926 ntohs(cm_id->local_addr.sin_port),
1927 ntohl(cm_id->remote_addr.sin_addr.s_addr),
1928 ntohs(cm_id->remote_addr.sin_port));
1929
1930 /* find a route */
1931 rt = find_route(dev,
1932 cm_id->local_addr.sin_addr.s_addr,
1933 cm_id->remote_addr.sin_addr.s_addr,
1934 cm_id->local_addr.sin_port,
1935 cm_id->remote_addr.sin_port, 0);
1936 if (!rt) {
1937 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1938 err = -EHOSTUNREACH;
1939 goto fail3;
1940 }
1941 ep->dst = &rt->u.dst;
1942
1943 /* get a l2t entry */
1944 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
1945 PDBG("%s LOOPBACK\n", __func__);
1946 pdev = ip_dev_find(&init_net,
1947 cm_id->remote_addr.sin_addr.s_addr);
1948 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1949 ep->dst->neighbour,
1950 pdev, 0);
1951 ep->mtu = pdev->mtu;
1952 ep->tx_chan = cxgb4_port_chan(pdev);
1953 ep->smac_idx = ep->tx_chan << 1;
1954 step = ep->com.dev->rdev.lldi.ntxq /
1955 ep->com.dev->rdev.lldi.nchan;
1956 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1957 step = ep->com.dev->rdev.lldi.nrxq /
1958 ep->com.dev->rdev.lldi.nchan;
1959 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1960 cxgb4_port_idx(pdev) * step];
1961 dev_put(pdev);
1962 } else {
1963 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1964 ep->dst->neighbour,
1965 ep->dst->neighbour->dev, 0);
1966 ep->mtu = dst_mtu(ep->dst);
1967 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
1968 ep->smac_idx = ep->tx_chan << 1;
1969 step = ep->com.dev->rdev.lldi.ntxq /
1970 ep->com.dev->rdev.lldi.nchan;
1971 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
1972 step = ep->com.dev->rdev.lldi.nrxq /
1973 ep->com.dev->rdev.lldi.nchan;
1974 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1975 cxgb4_port_idx(ep->dst->neighbour->dev) * step];
1976 }
1977 if (!ep->l2t) {
1978 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1979 err = -ENOMEM;
1980 goto fail4;
1981 }
1982
1983 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1984 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1985 ep->l2t->idx);
1986
1987 state_set(&ep->com, CONNECTING);
1988 ep->tos = 0;
1989 ep->com.local_addr = cm_id->local_addr;
1990 ep->com.remote_addr = cm_id->remote_addr;
1991
1992 /* send connect request to rnic */
1993 err = send_connect(ep);
1994 if (!err)
1995 goto out;
1996
1997 cxgb4_l2t_release(ep->l2t);
1998fail4:
1999 dst_release(ep->dst);
2000fail3:
2001 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2002fail2:
2003 cm_id->rem_ref(cm_id);
2004 c4iw_put_ep(&ep->com);
2005out:
2006 return err;
2007}
2008
2009int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2010{
2011 int err = 0;
2012 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2013 struct c4iw_listen_ep *ep;
2014
2015
2016 might_sleep();
2017
2018 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2019 if (!ep) {
2020 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2021 err = -ENOMEM;
2022 goto fail1;
2023 }
2024 PDBG("%s ep %p\n", __func__, ep);
2025 cm_id->add_ref(cm_id);
2026 ep->com.cm_id = cm_id;
2027 ep->com.dev = dev;
2028 ep->backlog = backlog;
2029 ep->com.local_addr = cm_id->local_addr;
2030
2031 /*
2032 * Allocate a server TID.
2033 */
2034 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2035 if (ep->stid == -1) {
2036 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2037 err = -ENOMEM;
2038 goto fail2;
2039 }
2040
2041 state_set(&ep->com, LISTEN);
2042 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2043 ep->com.local_addr.sin_addr.s_addr,
2044 ep->com.local_addr.sin_port,
2045 ep->com.dev->rdev.lldi.rxq_ids[0]);
2046 if (err)
2047 goto fail3;
2048
2049 /* wait for pass_open_rpl */
2050 wait_event(ep->com.waitq, ep->com.rpl_done);
2051 err = ep->com.rpl_err;
2052 if (!err) {
2053 cm_id->provider_data = ep;
2054 goto out;
2055 }
2056fail3:
2057 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2058fail2:
2059 cm_id->rem_ref(cm_id);
2060 c4iw_put_ep(&ep->com);
2061fail1:
2062out:
2063 return err;
2064}
2065
2066int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2067{
2068 int err;
2069 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2070
2071 PDBG("%s ep %p\n", __func__, ep);
2072
2073 might_sleep();
2074 state_set(&ep->com, DEAD);
2075 ep->com.rpl_done = 0;
2076 ep->com.rpl_err = 0;
2077 err = listen_stop(ep);
2078 if (err)
2079 goto done;
2080 wait_event(ep->com.waitq, ep->com.rpl_done);
2081 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2082done:
2083 err = ep->com.rpl_err;
2084 cm_id->rem_ref(cm_id);
2085 c4iw_put_ep(&ep->com);
2086 return err;
2087}
2088
2089int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2090{
2091 int ret = 0;
2092 unsigned long flags;
2093 int close = 0;
2094 int fatal = 0;
2095 struct c4iw_rdev *rdev;
2096 int start_timer = 0;
2097 int stop_timer = 0;
2098
2099 spin_lock_irqsave(&ep->com.lock, flags);
2100
2101 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2102 states[ep->com.state], abrupt);
2103
2104 rdev = &ep->com.dev->rdev;
2105 if (c4iw_fatal_error(rdev)) {
2106 fatal = 1;
2107 close_complete_upcall(ep);
2108 ep->com.state = DEAD;
2109 }
2110 switch (ep->com.state) {
2111 case MPA_REQ_WAIT:
2112 case MPA_REQ_SENT:
2113 case MPA_REQ_RCVD:
2114 case MPA_REP_SENT:
2115 case FPDU_MODE:
2116 close = 1;
2117 if (abrupt)
2118 ep->com.state = ABORTING;
2119 else {
2120 ep->com.state = CLOSING;
2121 start_timer = 1;
2122 }
2123 set_bit(CLOSE_SENT, &ep->com.flags);
2124 break;
2125 case CLOSING:
2126 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2127 close = 1;
2128 if (abrupt) {
2129 stop_timer = 1;
2130 ep->com.state = ABORTING;
2131 } else
2132 ep->com.state = MORIBUND;
2133 }
2134 break;
2135 case MORIBUND:
2136 case ABORTING:
2137 case DEAD:
2138 PDBG("%s ignoring disconnect ep %p state %u\n",
2139 __func__, ep, ep->com.state);
2140 break;
2141 default:
2142 BUG();
2143 break;
2144 }
2145
2146 spin_unlock_irqrestore(&ep->com.lock, flags);
2147 if (start_timer)
2148 start_ep_timer(ep);
2149 if (stop_timer)
2150 stop_ep_timer(ep);
2151 if (close) {
2152 if (abrupt)
2153 ret = abort_connection(ep, NULL, gfp);
2154 else
2155 ret = send_halfclose(ep, gfp);
2156 if (ret)
2157 fatal = 1;
2158 }
2159 if (fatal)
2160 release_ep_resources(ep);
2161 return ret;
2162}
2163
2164/*
2165 * These are the real handlers that are called from a
2166 * work queue.
2167 */
2168static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2169 [CPL_ACT_ESTABLISH] = act_establish,
2170 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2171 [CPL_RX_DATA] = rx_data,
2172 [CPL_ABORT_RPL_RSS] = abort_rpl,
2173 [CPL_ABORT_RPL] = abort_rpl,
2174 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2175 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2176 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2177 [CPL_PASS_ESTABLISH] = pass_establish,
2178 [CPL_PEER_CLOSE] = peer_close,
2179 [CPL_ABORT_REQ_RSS] = peer_abort,
2180 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2181 [CPL_RDMA_TERMINATE] = terminate,
2182 [CPL_FW4_ACK] = fw4_ack
2183};
2184
2185static void process_timeout(struct c4iw_ep *ep)
2186{
2187 struct c4iw_qp_attributes attrs;
2188 int abort = 1;
2189
2190 spin_lock_irq(&ep->com.lock);
2191 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2192 ep->com.state);
2193 switch (ep->com.state) {
2194 case MPA_REQ_SENT:
2195 __state_set(&ep->com, ABORTING);
2196 connect_reply_upcall(ep, -ETIMEDOUT);
2197 break;
2198 case MPA_REQ_WAIT:
2199 __state_set(&ep->com, ABORTING);
2200 break;
2201 case CLOSING:
2202 case MORIBUND:
2203 if (ep->com.cm_id && ep->com.qp) {
2204 attrs.next_state = C4IW_QP_STATE_ERROR;
2205 c4iw_modify_qp(ep->com.qp->rhp,
2206 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2207 &attrs, 1);
2208 }
2209 __state_set(&ep->com, ABORTING);
2210 break;
2211 default:
2212 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2213 __func__, ep, ep->hwtid, ep->com.state);
2214 WARN_ON(1);
2215 abort = 0;
2216 }
2217 spin_unlock_irq(&ep->com.lock);
2218 if (abort)
2219 abort_connection(ep, NULL, GFP_KERNEL);
2220 c4iw_put_ep(&ep->com);
2221}
2222
2223static void process_timedout_eps(void)
2224{
2225 struct c4iw_ep *ep;
2226
2227 spin_lock_irq(&timeout_lock);
2228 while (!list_empty(&timeout_list)) {
2229 struct list_head *tmp;
2230
2231 tmp = timeout_list.next;
2232 list_del(tmp);
2233 spin_unlock_irq(&timeout_lock);
2234 ep = list_entry(tmp, struct c4iw_ep, entry);
2235 process_timeout(ep);
2236 spin_lock_irq(&timeout_lock);
2237 }
2238 spin_unlock_irq(&timeout_lock);
2239}
2240
2241static void process_work(struct work_struct *work)
2242{
2243 struct sk_buff *skb = NULL;
2244 struct c4iw_dev *dev;
2245 struct cpl_act_establish *rpl = cplhdr(skb);
2246 unsigned int opcode;
2247 int ret;
2248
2249 while ((skb = skb_dequeue(&rxq))) {
2250 rpl = cplhdr(skb);
2251 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2252 opcode = rpl->ot.opcode;
2253
2254 BUG_ON(!work_handlers[opcode]);
2255 ret = work_handlers[opcode](dev, skb);
2256 if (!ret)
2257 kfree_skb(skb);
2258 }
2259 process_timedout_eps();
2260}
2261
2262static DECLARE_WORK(skb_work, process_work);
2263
2264static void ep_timeout(unsigned long arg)
2265{
2266 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2267
2268 spin_lock(&timeout_lock);
2269 list_add_tail(&ep->entry, &timeout_list);
2270 spin_unlock(&timeout_lock);
2271 queue_work(workq, &skb_work);
2272}
2273
2274/*
2275 * All the CM events are handled on a work queue to have a safe context.
2276 */
2277static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2278{
2279
2280 /*
2281 * Save dev in the skb->cb area.
2282 */
2283 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2284
2285 /*
2286 * Queue the skb and schedule the worker thread.
2287 */
2288 skb_queue_tail(&rxq, skb);
2289 queue_work(workq, &skb_work);
2290 return 0;
2291}
2292
2293static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2294{
2295 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2296
2297 if (rpl->status != CPL_ERR_NONE) {
2298 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2299 "for tid %u\n", rpl->status, GET_TID(rpl));
2300 }
2301 return 0;
2302}
2303
2304static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2305{
2306 struct cpl_fw6_msg *rpl = cplhdr(skb);
2307 struct c4iw_wr_wait *wr_waitp;
2308 int ret;
2309
2310 PDBG("%s type %u\n", __func__, rpl->type);
2311
2312 switch (rpl->type) {
2313 case 1:
2314 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2315 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
2316 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2317 if (wr_waitp) {
2318 wr_waitp->ret = ret;
2319 wr_waitp->done = 1;
2320 wake_up(&wr_waitp->wait);
2321 }
2322 break;
2323 case 2:
2324 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2325 break;
2326 default:
2327 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2328 rpl->type);
2329 break;
2330 }
2331 return 0;
2332}
2333
2334/*
2335 * Most upcalls from the T4 Core go to sched() to
2336 * schedule the processing on a work queue.
2337 */
2338c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2339 [CPL_ACT_ESTABLISH] = sched,
2340 [CPL_ACT_OPEN_RPL] = sched,
2341 [CPL_RX_DATA] = sched,
2342 [CPL_ABORT_RPL_RSS] = sched,
2343 [CPL_ABORT_RPL] = sched,
2344 [CPL_PASS_OPEN_RPL] = sched,
2345 [CPL_CLOSE_LISTSRV_RPL] = sched,
2346 [CPL_PASS_ACCEPT_REQ] = sched,
2347 [CPL_PASS_ESTABLISH] = sched,
2348 [CPL_PEER_CLOSE] = sched,
2349 [CPL_CLOSE_CON_RPL] = sched,
2350 [CPL_ABORT_REQ_RSS] = sched,
2351 [CPL_RDMA_TERMINATE] = sched,
2352 [CPL_FW4_ACK] = sched,
2353 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2354 [CPL_FW6_MSG] = fw6_msg
2355};
2356
2357int __init c4iw_cm_init(void)
2358{
2359 spin_lock_init(&timeout_lock);
2360 skb_queue_head_init(&rxq);
2361
2362 workq = create_singlethread_workqueue("iw_cxgb4");
2363 if (!workq)
2364 return -ENOMEM;
2365
2366 return 0;
2367}
2368
2369void __exit c4iw_cm_term(void)
2370{
2371 WARN_ON(!list_empty(&timeout_list));
2372 flush_workqueue(workq);
2373 destroy_workqueue(workq);
2374}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 000000000000..fb1aafcc294f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,882 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 struct sk_buff *skb;
43 int ret;
44
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
47 if (!skb)
48 return -ENOMEM;
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1));
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (u64)&wr_wait;
59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
66 if (!ret) {
67 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
68 if (!wr_wait.done) {
69 printk(KERN_ERR MOD "Device %s not responding!\n",
70 pci_name(rdev->lldi.pdev));
71 rdev->flags = T4_FATAL_ERROR;
72 ret = -EIO;
73 } else
74 ret = wr_wait.ret;
75 }
76
77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret;
83}
84
85static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
86 struct c4iw_dev_ucontext *uctx)
87{
88 struct fw_ri_res_wr *res_wr;
89 struct fw_ri_res *res;
90 int wr_len;
91 int user = (uctx != &rdev->uctx);
92 struct c4iw_wr_wait wr_wait;
93 int ret;
94 struct sk_buff *skb;
95
96 cq->cqid = c4iw_get_cqid(rdev, uctx);
97 if (!cq->cqid) {
98 ret = -ENOMEM;
99 goto err1;
100 }
101
102 if (!user) {
103 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
104 if (!cq->sw_queue) {
105 ret = -ENOMEM;
106 goto err2;
107 }
108 }
109 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
110 &cq->dma_addr, GFP_KERNEL);
111 if (!cq->queue) {
112 ret = -ENOMEM;
113 goto err3;
114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize);
117
118 /* build fw_ri_res_wr */
119 wr_len = sizeof *res_wr + sizeof *res;
120
121 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
122 if (!skb) {
123 ret = -ENOMEM;
124 goto err4;
125 }
126 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
127
128 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
129 memset(res_wr, 0, wr_len);
130 res_wr->op_nres = cpu_to_be32(
131 FW_WR_OP(FW_RI_RES_WR) |
132 V_FW_RI_RES_WR_NRES(1) |
133 FW_WR_COMPL(1));
134 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
135 res_wr->cookie = (u64)&wr_wait;
136 res = res_wr->res;
137 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
138 res->u.cq.op = FW_RI_RES_OP_WRITE;
139 res->u.cq.iqid = cpu_to_be32(cq->cqid);
140 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
141 V_FW_RI_RES_WR_IQANUS(0) |
142 V_FW_RI_RES_WR_IQANUD(1) |
143 F_FW_RI_RES_WR_IQANDST |
144 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
145 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
146 F_FW_RI_RES_WR_IQDROPRSS |
147 V_FW_RI_RES_WR_IQPCIECH(2) |
148 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
149 F_FW_RI_RES_WR_IQO |
150 V_FW_RI_RES_WR_IQESIZE(1));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153
154 c4iw_init_wr_wait(&wr_wait);
155
156 ret = c4iw_ofld_send(rdev, skb);
157 if (ret)
158 goto err4;
159 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
160 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
161 if (!wr_wait.done) {
162 printk(KERN_ERR MOD "Device %s not responding!\n",
163 pci_name(rdev->lldi.pdev));
164 rdev->flags = T4_FATAL_ERROR;
165 ret = -EIO;
166 } else
167 ret = wr_wait.ret;
168 if (ret)
169 goto err4;
170
171 cq->gen = 1;
172 cq->gts = rdev->lldi.gts_reg;
173 cq->rdev = rdev;
174 if (user) {
175 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
176 (cq->cqid << rdev->cqshift);
177 cq->ugts &= PAGE_MASK;
178 }
179 return 0;
180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping));
183err3:
184 kfree(cq->sw_queue);
185err2:
186 c4iw_put_cqid(rdev, cq->cqid, uctx);
187err1:
188 return ret;
189}
190
191static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
192{
193 struct t4_cqe cqe;
194
195 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
196 wq, cq, cq->sw_cidx, cq->sw_pidx);
197 memset(&cqe, 0, sizeof(cqe));
198 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
199 V_CQE_OPCODE(FW_RI_SEND) |
200 V_CQE_TYPE(0) |
201 V_CQE_SWCQE(1) |
202 V_CQE_QPID(wq->rq.qid));
203 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
204 cq->sw_queue[cq->sw_pidx] = cqe;
205 t4_swcq_produce(cq);
206}
207
208int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
209{
210 int flushed = 0;
211 int in_use = wq->rq.in_use - count;
212
213 BUG_ON(in_use < 0);
214 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
215 wq, cq, wq->rq.in_use, count);
216 while (in_use--) {
217 insert_recv_cqe(wq, cq);
218 flushed++;
219 }
220 return flushed;
221}
222
223static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
224 struct t4_swsqe *swcqe)
225{
226 struct t4_cqe cqe;
227
228 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
229 wq, cq, cq->sw_cidx, cq->sw_pidx);
230 memset(&cqe, 0, sizeof(cqe));
231 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
232 V_CQE_OPCODE(swcqe->opcode) |
233 V_CQE_TYPE(1) |
234 V_CQE_SWCQE(1) |
235 V_CQE_QPID(wq->sq.qid));
236 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
237 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
238 cq->sw_queue[cq->sw_pidx] = cqe;
239 t4_swcq_produce(cq);
240}
241
242int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
243{
244 int flushed = 0;
245 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
246 int in_use = wq->sq.in_use - count;
247
248 BUG_ON(in_use < 0);
249 while (in_use--) {
250 swsqe->signaled = 0;
251 insert_sq_cqe(wq, cq, swsqe);
252 swsqe++;
253 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
254 swsqe = wq->sq.sw_sq;
255 flushed++;
256 }
257 return flushed;
258}
259
260/*
261 * Move all CQEs from the HWCQ into the SWCQ.
262 */
263void c4iw_flush_hw_cq(struct t4_cq *cq)
264{
265 struct t4_cqe *cqe = NULL, *swcqe;
266 int ret;
267
268 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
269 ret = t4_next_hw_cqe(cq, &cqe);
270 while (!ret) {
271 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
272 __func__, cq->cidx, cq->sw_pidx);
273 swcqe = &cq->sw_queue[cq->sw_pidx];
274 *swcqe = *cqe;
275 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
276 t4_swcq_produce(cq);
277 t4_hwcq_consume(cq);
278 ret = t4_next_hw_cqe(cq, &cqe);
279 }
280}
281
282static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
283{
284 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
285 return 0;
286
287 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
288 return 0;
289
290 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
291 return 0;
292
293 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
294 return 0;
295 return 1;
296}
297
298void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
299{
300 struct t4_cqe *cqe;
301 u32 ptr;
302
303 *count = 0;
304 ptr = cq->sw_cidx;
305 while (ptr != cq->sw_pidx) {
306 cqe = &cq->sw_queue[ptr];
307 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
308 wq->sq.oldest_read)) &&
309 (CQE_QPID(cqe) == wq->sq.qid))
310 (*count)++;
311 if (++ptr == cq->size)
312 ptr = 0;
313 }
314 PDBG("%s cq %p count %d\n", __func__, cq, *count);
315}
316
317void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
318{
319 struct t4_cqe *cqe;
320 u32 ptr;
321
322 *count = 0;
323 PDBG("%s count zero %d\n", __func__, *count);
324 ptr = cq->sw_cidx;
325 while (ptr != cq->sw_pidx) {
326 cqe = &cq->sw_queue[ptr];
327 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
328 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
329 (*count)++;
330 if (++ptr == cq->size)
331 ptr = 0;
332 }
333 PDBG("%s cq %p count %d\n", __func__, cq, *count);
334}
335
336static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
337{
338 struct t4_swsqe *swsqe;
339 u16 ptr = wq->sq.cidx;
340 int count = wq->sq.in_use;
341 int unsignaled = 0;
342
343 swsqe = &wq->sq.sw_sq[ptr];
344 while (count--)
345 if (!swsqe->signaled) {
346 if (++ptr == wq->sq.size)
347 ptr = 0;
348 swsqe = &wq->sq.sw_sq[ptr];
349 unsignaled++;
350 } else if (swsqe->complete) {
351
352 /*
353 * Insert this completed cqe into the swcq.
354 */
355 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
356 __func__, ptr, cq->sw_pidx);
357 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
358 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
359 t4_swcq_produce(cq);
360 swsqe->signaled = 0;
361 wq->sq.in_use -= unsignaled;
362 break;
363 } else
364 break;
365}
366
367static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
368 struct t4_cqe *read_cqe)
369{
370 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
371 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
372 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) |
375 V_CQE_TYPE(1));
376}
377
378/*
379 * Return a ptr to the next read wr in the SWSQ or NULL.
380 */
381static void advance_oldest_read(struct t4_wq *wq)
382{
383
384 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
385
386 if (rptr == wq->sq.size)
387 rptr = 0;
388 while (rptr != wq->sq.pidx) {
389 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
390
391 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
392 return;
393 if (++rptr == wq->sq.size)
394 rptr = 0;
395 }
396 wq->sq.oldest_read = NULL;
397}
398
399/*
400 * poll_cq
401 *
402 * Caller must:
403 * check the validity of the first CQE,
404 * supply the wq assicated with the qpid.
405 *
406 * credit: cq credit to return to sge.
407 * cqe_flushed: 1 iff the CQE is flushed.
408 * cqe: copy of the polled CQE.
409 *
410 * return value:
411 * 0 CQE returned ok.
412 * -EAGAIN CQE skipped, try again.
413 * -EOVERFLOW CQ overflow detected.
414 */
415static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
416 u8 *cqe_flushed, u64 *cookie, u32 *credit)
417{
418 int ret = 0;
419 struct t4_cqe *hw_cqe, read_cqe;
420
421 *cqe_flushed = 0;
422 *credit = 0;
423 ret = t4_next_cqe(cq, &hw_cqe);
424 if (ret)
425 return ret;
426
427 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
428 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
429 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
430 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
431 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
432 CQE_WRID_LOW(hw_cqe));
433
434 /*
435 * skip cqe's not affiliated with a QP.
436 */
437 if (wq == NULL) {
438 ret = -EAGAIN;
439 goto skip_cqe;
440 }
441
442 /*
443 * Gotta tweak READ completions:
444 * 1) the cqe doesn't contain the sq_wptr from the wr.
445 * 2) opcode not reflected from the wr.
446 * 3) read_len not reflected from the wr.
447 * 4) cq_type is RQ_TYPE not SQ_TYPE.
448 */
449 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
450
451 /*
452 * If this is an unsolicited read response, then the read
453 * was generated by the kernel driver as part of peer-2-peer
454 * connection setup. So ignore the completion.
455 */
456 if (!wq->sq.oldest_read) {
457 if (CQE_STATUS(hw_cqe))
458 t4_set_wq_in_error(wq);
459 ret = -EAGAIN;
460 goto skip_cqe;
461 }
462
463 /*
464 * Don't write to the HWCQ, so create a new read req CQE
465 * in local memory.
466 */
467 create_read_req_cqe(wq, hw_cqe, &read_cqe);
468 hw_cqe = &read_cqe;
469 advance_oldest_read(wq);
470 }
471
472 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
473 *cqe_flushed = t4_wq_in_error(wq);
474 t4_set_wq_in_error(wq);
475 goto proc_cqe;
476 }
477
478 /*
479 * RECV completion.
480 */
481 if (RQ_TYPE(hw_cqe)) {
482
483 /*
484 * HW only validates 4 bits of MSN. So we must validate that
485 * the MSN in the SEND is the next expected MSN. If its not,
486 * then we complete this with T4_ERR_MSN and mark the wq in
487 * error.
488 */
489
490 if (t4_rq_empty(wq)) {
491 t4_set_wq_in_error(wq);
492 ret = -EAGAIN;
493 goto skip_cqe;
494 }
495 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
496 t4_set_wq_in_error(wq);
497 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
498 goto proc_cqe;
499 }
500 goto proc_cqe;
501 }
502
503 /*
504 * If we get here its a send completion.
505 *
506 * Handle out of order completion. These get stuffed
507 * in the SW SQ. Then the SW SQ is walked to move any
508 * now in-order completions into the SW CQ. This handles
509 * 2 cases:
510 * 1) reaping unsignaled WRs when the first subsequent
511 * signaled WR is completed.
512 * 2) out of order read completions.
513 */
514 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
515 struct t4_swsqe *swsqe;
516
517 PDBG("%s out of order completion going in sw_sq at idx %u\n",
518 __func__, CQE_WRID_SQ_IDX(hw_cqe));
519 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
520 swsqe->cqe = *hw_cqe;
521 swsqe->complete = 1;
522 ret = -EAGAIN;
523 goto flush_wq;
524 }
525
526proc_cqe:
527 *cqe = *hw_cqe;
528
529 /*
530 * Reap the associated WR(s) that are freed up with this
531 * completion.
532 */
533 if (SQ_TYPE(hw_cqe)) {
534 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
535 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
536 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
537 t4_sq_consume(wq);
538 } else {
539 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
540 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
541 BUG_ON(t4_rq_empty(wq));
542 t4_rq_consume(wq);
543 }
544
545flush_wq:
546 /*
547 * Flush any completed cqes that are now in-order.
548 */
549 flush_completed_wrs(wq, cq);
550
551skip_cqe:
552 if (SW_CQE(hw_cqe)) {
553 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
554 __func__, cq, cq->cqid, cq->sw_cidx);
555 t4_swcq_consume(cq);
556 } else {
557 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
558 __func__, cq, cq->cqid, cq->cidx);
559 t4_hwcq_consume(cq);
560 }
561 return ret;
562}
563
564/*
565 * Get one cq entry from c4iw and map it to openib.
566 *
567 * Returns:
568 * 0 cqe returned
569 * -ENODATA EMPTY;
570 * -EAGAIN caller must try again
571 * any other -errno fatal error
572 */
573static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
574{
575 struct c4iw_qp *qhp = NULL;
576 struct t4_cqe cqe = {0, 0}, *rd_cqe;
577 struct t4_wq *wq;
578 u32 credit = 0;
579 u8 cqe_flushed;
580 u64 cookie = 0;
581 int ret;
582
583 ret = t4_next_cqe(&chp->cq, &rd_cqe);
584
585 if (ret)
586 return ret;
587
588 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
589 if (!qhp)
590 wq = NULL;
591 else {
592 spin_lock(&qhp->lock);
593 wq = &(qhp->wq);
594 }
595 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
596 if (ret)
597 goto out;
598
599 wc->wr_id = cookie;
600 wc->qp = &qhp->ibqp;
601 wc->vendor_err = CQE_STATUS(&cqe);
602 wc->wc_flags = 0;
603
604 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
605 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
606 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
607 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
608
609 if (CQE_TYPE(&cqe) == 0) {
610 if (!CQE_STATUS(&cqe))
611 wc->byte_len = CQE_LEN(&cqe);
612 else
613 wc->byte_len = 0;
614 wc->opcode = IB_WC_RECV;
615 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
616 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
617 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
618 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
619 }
620 } else {
621 switch (CQE_OPCODE(&cqe)) {
622 case FW_RI_RDMA_WRITE:
623 wc->opcode = IB_WC_RDMA_WRITE;
624 break;
625 case FW_RI_READ_REQ:
626 wc->opcode = IB_WC_RDMA_READ;
627 wc->byte_len = CQE_LEN(&cqe);
628 break;
629 case FW_RI_SEND_WITH_INV:
630 case FW_RI_SEND_WITH_SE_INV:
631 wc->opcode = IB_WC_SEND;
632 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
633 break;
634 case FW_RI_SEND:
635 case FW_RI_SEND_WITH_SE:
636 wc->opcode = IB_WC_SEND;
637 break;
638 case FW_RI_BIND_MW:
639 wc->opcode = IB_WC_BIND_MW;
640 break;
641
642 case FW_RI_LOCAL_INV:
643 wc->opcode = IB_WC_LOCAL_INV;
644 break;
645 case FW_RI_FAST_REGISTER:
646 wc->opcode = IB_WC_FAST_REG_MR;
647 break;
648 default:
649 printk(KERN_ERR MOD "Unexpected opcode %d "
650 "in the CQE received for QPID=0x%0x\n",
651 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
652 ret = -EINVAL;
653 goto out;
654 }
655 }
656
657 if (cqe_flushed)
658 wc->status = IB_WC_WR_FLUSH_ERR;
659 else {
660
661 switch (CQE_STATUS(&cqe)) {
662 case T4_ERR_SUCCESS:
663 wc->status = IB_WC_SUCCESS;
664 break;
665 case T4_ERR_STAG:
666 wc->status = IB_WC_LOC_ACCESS_ERR;
667 break;
668 case T4_ERR_PDID:
669 wc->status = IB_WC_LOC_PROT_ERR;
670 break;
671 case T4_ERR_QPID:
672 case T4_ERR_ACCESS:
673 wc->status = IB_WC_LOC_ACCESS_ERR;
674 break;
675 case T4_ERR_WRAP:
676 wc->status = IB_WC_GENERAL_ERR;
677 break;
678 case T4_ERR_BOUND:
679 wc->status = IB_WC_LOC_LEN_ERR;
680 break;
681 case T4_ERR_INVALIDATE_SHARED_MR:
682 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
683 wc->status = IB_WC_MW_BIND_ERR;
684 break;
685 case T4_ERR_CRC:
686 case T4_ERR_MARKER:
687 case T4_ERR_PDU_LEN_ERR:
688 case T4_ERR_OUT_OF_RQE:
689 case T4_ERR_DDP_VERSION:
690 case T4_ERR_RDMA_VERSION:
691 case T4_ERR_DDP_QUEUE_NUM:
692 case T4_ERR_MSN:
693 case T4_ERR_TBIT:
694 case T4_ERR_MO:
695 case T4_ERR_MSN_RANGE:
696 case T4_ERR_IRD_OVERFLOW:
697 case T4_ERR_OPCODE:
698 wc->status = IB_WC_FATAL_ERR;
699 break;
700 case T4_ERR_SWFLUSH:
701 wc->status = IB_WC_WR_FLUSH_ERR;
702 break;
703 default:
704 printk(KERN_ERR MOD
705 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
706 CQE_STATUS(&cqe), CQE_QPID(&cqe));
707 ret = -EINVAL;
708 }
709 }
710out:
711 if (wq)
712 spin_unlock(&qhp->lock);
713 return ret;
714}
715
716int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
717{
718 struct c4iw_cq *chp;
719 unsigned long flags;
720 int npolled;
721 int err = 0;
722
723 chp = to_c4iw_cq(ibcq);
724
725 spin_lock_irqsave(&chp->lock, flags);
726 for (npolled = 0; npolled < num_entries; ++npolled) {
727 do {
728 err = c4iw_poll_cq_one(chp, wc + npolled);
729 } while (err == -EAGAIN);
730 if (err)
731 break;
732 }
733 spin_unlock_irqrestore(&chp->lock, flags);
734 return !err || err == -ENODATA ? npolled : err;
735}
736
737int c4iw_destroy_cq(struct ib_cq *ib_cq)
738{
739 struct c4iw_cq *chp;
740 struct c4iw_ucontext *ucontext;
741
742 PDBG("%s ib_cq %p\n", __func__, ib_cq);
743 chp = to_c4iw_cq(ib_cq);
744
745 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
746 atomic_dec(&chp->refcnt);
747 wait_event(chp->wait, !atomic_read(&chp->refcnt));
748
749 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
750 : NULL;
751 destroy_cq(&chp->rhp->rdev, &chp->cq,
752 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
753 kfree(chp);
754 return 0;
755}
756
757struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
758 int vector, struct ib_ucontext *ib_context,
759 struct ib_udata *udata)
760{
761 struct c4iw_dev *rhp;
762 struct c4iw_cq *chp;
763 struct c4iw_create_cq_resp uresp;
764 struct c4iw_ucontext *ucontext = NULL;
765 int ret;
766 size_t memsize;
767 struct c4iw_mm_entry *mm, *mm2;
768
769 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
770
771 rhp = to_c4iw_dev(ibdev);
772
773 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
774 if (!chp)
775 return ERR_PTR(-ENOMEM);
776
777 if (ib_context)
778 ucontext = to_c4iw_ucontext(ib_context);
779
780 /* account for the status page. */
781 entries++;
782
783 /*
784 * entries must be multiple of 16 for HW.
785 */
786 entries = roundup(entries, 16);
787 memsize = entries * sizeof *chp->cq.queue;
788
789 /*
790 * memsize must be a multiple of the page size if its a user cq.
791 */
792 if (ucontext)
793 memsize = roundup(memsize, PAGE_SIZE);
794 chp->cq.size = entries;
795 chp->cq.memsize = memsize;
796
797 ret = create_cq(&rhp->rdev, &chp->cq,
798 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
799 if (ret)
800 goto err1;
801
802 chp->rhp = rhp;
803 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size;
805 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait);
808 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
809 if (ret)
810 goto err2;
811
812 if (ucontext) {
813 mm = kmalloc(sizeof *mm, GFP_KERNEL);
814 if (!mm)
815 goto err3;
816 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
817 if (!mm2)
818 goto err4;
819
820 uresp.qid_mask = rhp->rdev.cqmask;
821 uresp.cqid = chp->cq.cqid;
822 uresp.size = chp->cq.size;
823 uresp.memsize = chp->cq.memsize;
824 spin_lock(&ucontext->mmap_lock);
825 uresp.key = ucontext->key;
826 ucontext->key += PAGE_SIZE;
827 uresp.gts_key = ucontext->key;
828 ucontext->key += PAGE_SIZE;
829 spin_unlock(&ucontext->mmap_lock);
830 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
831 if (ret)
832 goto err5;
833
834 mm->key = uresp.key;
835 mm->addr = virt_to_phys(chp->cq.queue);
836 mm->len = chp->cq.memsize;
837 insert_mmap(ucontext, mm);
838
839 mm2->key = uresp.gts_key;
840 mm2->addr = chp->cq.ugts;
841 mm2->len = PAGE_SIZE;
842 insert_mmap(ucontext, mm2);
843 }
844 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
845 __func__, chp->cq.cqid, chp, chp->cq.size,
846 chp->cq.memsize,
847 (unsigned long long) chp->cq.dma_addr);
848 return &chp->ibcq;
849err5:
850 kfree(mm2);
851err4:
852 kfree(mm);
853err3:
854 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
855err2:
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
858err1:
859 kfree(chp);
860 return ERR_PTR(ret);
861}
862
863int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
864{
865 return -ENOSYS;
866}
867
868int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
869{
870 struct c4iw_cq *chp;
871 int ret;
872 unsigned long flag;
873
874 chp = to_c4iw_cq(ibcq);
875 spin_lock_irqsave(&chp->lock, flag);
876 ret = t4_arm_cq(&chp->cq,
877 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
878 spin_unlock_irqrestore(&chp->lock, flag);
879 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
880 ret = 0;
881 return ret;
882}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 000000000000..be23b5eab13b
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,520 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
35
36#include <rdma/ib_verbs.h>
37
38#include "iw_cxgb4.h"
39
40#define DRV_VERSION "0.1"
41
42MODULE_AUTHOR("Steve Wise");
43MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION);
46
47static LIST_HEAD(dev_list);
48static DEFINE_MUTEX(dev_mutex);
49
50static struct dentry *c4iw_debugfs_root;
51
52struct debugfs_qp_data {
53 struct c4iw_dev *devp;
54 char *buf;
55 int bufsize;
56 int pos;
57};
58
59static int count_qps(int id, void *p, void *data)
60{
61 struct c4iw_qp *qp = p;
62 int *countp = data;
63
64 if (id != qp->wq.sq.qid)
65 return 0;
66
67 *countp = *countp + 1;
68 return 0;
69}
70
71static int dump_qps(int id, void *p, void *data)
72{
73 struct c4iw_qp *qp = p;
74 struct debugfs_qp_data *qpd = data;
75 int space;
76 int cc;
77
78 if (id != qp->wq.sq.qid)
79 return 0;
80
81 space = qpd->bufsize - qpd->pos - 1;
82 if (space == 0)
83 return 1;
84
85 if (qp->ep)
86 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
87 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
88 qp->wq.sq.qid, (int)qp->attr.state,
89 qp->ep->hwtid, (int)qp->ep->com.state,
90 &qp->ep->com.local_addr.sin_addr.s_addr,
91 ntohs(qp->ep->com.local_addr.sin_port),
92 &qp->ep->com.remote_addr.sin_addr.s_addr,
93 ntohs(qp->ep->com.remote_addr.sin_port));
94 else
95 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
96 qp->wq.sq.qid, (int)qp->attr.state);
97 if (cc < space)
98 qpd->pos += cc;
99 return 0;
100}
101
102static int qp_release(struct inode *inode, struct file *file)
103{
104 struct debugfs_qp_data *qpd = file->private_data;
105 if (!qpd) {
106 printk(KERN_INFO "%s null qpd?\n", __func__);
107 return 0;
108 }
109 kfree(qpd->buf);
110 kfree(qpd);
111 return 0;
112}
113
114static int qp_open(struct inode *inode, struct file *file)
115{
116 struct debugfs_qp_data *qpd;
117 int ret = 0;
118 int count = 1;
119
120 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
121 if (!qpd) {
122 ret = -ENOMEM;
123 goto out;
124 }
125 qpd->devp = inode->i_private;
126 qpd->pos = 0;
127
128 spin_lock_irq(&qpd->devp->lock);
129 idr_for_each(&qpd->devp->qpidr, count_qps, &count);
130 spin_unlock_irq(&qpd->devp->lock);
131
132 qpd->bufsize = count * 128;
133 qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
134 if (!qpd->buf) {
135 ret = -ENOMEM;
136 goto err1;
137 }
138
139 spin_lock_irq(&qpd->devp->lock);
140 idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
141 spin_unlock_irq(&qpd->devp->lock);
142
143 qpd->buf[qpd->pos++] = 0;
144 file->private_data = qpd;
145 goto out;
146err1:
147 kfree(qpd);
148out:
149 return ret;
150}
151
152static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
153 loff_t *ppos)
154{
155 struct debugfs_qp_data *qpd = file->private_data;
156 loff_t pos = *ppos;
157 loff_t avail = qpd->pos;
158
159 if (pos < 0)
160 return -EINVAL;
161 if (pos >= avail)
162 return 0;
163 if (count > avail - pos)
164 count = avail - pos;
165
166 while (count) {
167 size_t len = 0;
168
169 len = min((int)count, (int)qpd->pos - (int)pos);
170 if (copy_to_user(buf, qpd->buf + pos, len))
171 return -EFAULT;
172 if (len == 0)
173 return -EINVAL;
174
175 buf += len;
176 pos += len;
177 count -= len;
178 }
179 count = pos - *ppos;
180 *ppos = pos;
181 return count;
182}
183
184static const struct file_operations qp_debugfs_fops = {
185 .owner = THIS_MODULE,
186 .open = qp_open,
187 .release = qp_release,
188 .read = qp_read,
189};
190
191static int setup_debugfs(struct c4iw_dev *devp)
192{
193 struct dentry *de;
194
195 if (!devp->debugfs_root)
196 return -1;
197
198 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
199 (void *)devp, &qp_debugfs_fops);
200 if (de && de->d_inode)
201 de->d_inode->i_size = 4096;
202 return 0;
203}
204
205void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
206 struct c4iw_dev_ucontext *uctx)
207{
208 struct list_head *pos, *nxt;
209 struct c4iw_qid_list *entry;
210
211 mutex_lock(&uctx->lock);
212 list_for_each_safe(pos, nxt, &uctx->qpids) {
213 entry = list_entry(pos, struct c4iw_qid_list, entry);
214 list_del_init(&entry->entry);
215 if (!(entry->qid & rdev->qpmask))
216 c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
217 &rdev->resource.qid_fifo_lock);
218 kfree(entry);
219 }
220
221 list_for_each_safe(pos, nxt, &uctx->qpids) {
222 entry = list_entry(pos, struct c4iw_qid_list, entry);
223 list_del_init(&entry->entry);
224 kfree(entry);
225 }
226 mutex_unlock(&uctx->lock);
227}
228
229void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
230 struct c4iw_dev_ucontext *uctx)
231{
232 INIT_LIST_HEAD(&uctx->qpids);
233 INIT_LIST_HEAD(&uctx->cqids);
234 mutex_init(&uctx->lock);
235}
236
237/* Caller takes care of locking if needed */
238static int c4iw_rdev_open(struct c4iw_rdev *rdev)
239{
240 int err;
241
242 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
243
244 /*
245 * qpshift is the number of bits to shift the qpid left in order
246 * to get the correct address of the doorbell for that qp.
247 */
248 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
249 rdev->qpmask = rdev->lldi.udb_density - 1;
250 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
251 rdev->cqmask = rdev->lldi.ucq_density - 1;
252 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
253 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
254 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
255 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
256 rdev->lldi.vr->pbl.start,
257 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
258 rdev->lldi.vr->rq.size);
259 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
260 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
261 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
262 (void *)pci_resource_start(rdev->lldi.pdev, 2),
263 rdev->lldi.db_reg,
264 rdev->lldi.gts_reg,
265 rdev->qpshift, rdev->qpmask,
266 rdev->cqshift, rdev->cqmask);
267
268 if (c4iw_num_stags(rdev) == 0) {
269 err = -EINVAL;
270 goto err1;
271 }
272
273 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
274 if (err) {
275 printk(KERN_ERR MOD "error %d initializing resources\n", err);
276 goto err1;
277 }
278 err = c4iw_pblpool_create(rdev);
279 if (err) {
280 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
281 goto err2;
282 }
283 err = c4iw_rqtpool_create(rdev);
284 if (err) {
285 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
286 goto err3;
287 }
288 return 0;
289err3:
290 c4iw_pblpool_destroy(rdev);
291err2:
292 c4iw_destroy_resource(&rdev->resource);
293err1:
294 return err;
295}
296
297static void c4iw_rdev_close(struct c4iw_rdev *rdev)
298{
299 c4iw_pblpool_destroy(rdev);
300 c4iw_rqtpool_destroy(rdev);
301 c4iw_destroy_resource(&rdev->resource);
302}
303
304static void c4iw_remove(struct c4iw_dev *dev)
305{
306 PDBG("%s c4iw_dev %p\n", __func__, dev);
307 cancel_delayed_work_sync(&dev->db_drop_task);
308 list_del(&dev->entry);
309 c4iw_unregister_device(dev);
310 c4iw_rdev_close(&dev->rdev);
311 idr_destroy(&dev->cqidr);
312 idr_destroy(&dev->qpidr);
313 idr_destroy(&dev->mmidr);
314 ib_dealloc_device(&dev->ibdev);
315}
316
317static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
318{
319 struct c4iw_dev *devp;
320 int ret;
321
322 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
323 if (!devp) {
324 printk(KERN_ERR MOD "Cannot allocate ib device\n");
325 return NULL;
326 }
327 devp->rdev.lldi = *infop;
328
329 mutex_lock(&dev_mutex);
330
331 ret = c4iw_rdev_open(&devp->rdev);
332 if (ret) {
333 mutex_unlock(&dev_mutex);
334 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
335 ib_dealloc_device(&devp->ibdev);
336 return NULL;
337 }
338
339 idr_init(&devp->cqidr);
340 idr_init(&devp->qpidr);
341 idr_init(&devp->mmidr);
342 spin_lock_init(&devp->lock);
343 list_add_tail(&devp->entry, &dev_list);
344 mutex_unlock(&dev_mutex);
345
346 if (c4iw_register_device(devp)) {
347 printk(KERN_ERR MOD "Unable to register device\n");
348 mutex_lock(&dev_mutex);
349 c4iw_remove(devp);
350 mutex_unlock(&dev_mutex);
351 }
352 if (c4iw_debugfs_root) {
353 devp->debugfs_root = debugfs_create_dir(
354 pci_name(devp->rdev.lldi.pdev),
355 c4iw_debugfs_root);
356 setup_debugfs(devp);
357 }
358 return devp;
359}
360
361static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
362{
363 struct c4iw_dev *dev;
364 static int vers_printed;
365 int i;
366
367 if (!vers_printed++)
368 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
369 DRV_VERSION);
370
371 dev = c4iw_alloc(infop);
372 if (!dev)
373 goto out;
374
375 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
376 __func__, pci_name(dev->rdev.lldi.pdev),
377 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
378 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
379
380 for (i = 0; i < dev->rdev.lldi.nrxq; i++)
381 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
382
383 printk(KERN_INFO MOD "Initialized device %s\n",
384 pci_name(dev->rdev.lldi.pdev));
385out:
386 return dev;
387}
388
389static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
390 unsigned int skb_len,
391 unsigned int pull_len)
392{
393 struct sk_buff *skb;
394 struct skb_shared_info *ssi;
395
396 if (gl->tot_len <= 512) {
397 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
398 if (unlikely(!skb))
399 goto out;
400 __skb_put(skb, gl->tot_len);
401 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
402 } else {
403 skb = alloc_skb(skb_len, GFP_ATOMIC);
404 if (unlikely(!skb))
405 goto out;
406 __skb_put(skb, pull_len);
407 skb_copy_to_linear_data(skb, gl->va, pull_len);
408
409 ssi = skb_shinfo(skb);
410 ssi->frags[0].page = gl->frags[0].page;
411 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
412 ssi->frags[0].size = gl->frags[0].size - pull_len;
413 if (gl->nfrags > 1)
414 memcpy(&ssi->frags[1], &gl->frags[1],
415 (gl->nfrags - 1) * sizeof(skb_frag_t));
416 ssi->nr_frags = gl->nfrags;
417
418 skb->len = gl->tot_len;
419 skb->data_len = skb->len - pull_len;
420 skb->truesize += skb->data_len;
421
422 /* Get a reference for the last page, we don't own it */
423 get_page(gl->frags[gl->nfrags - 1].page);
424 }
425out:
426 return skb;
427}
428
429static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
430 const struct pkt_gl *gl)
431{
432 struct c4iw_dev *dev = handle;
433 struct sk_buff *skb;
434 const struct cpl_act_establish *rpl;
435 unsigned int opcode;
436
437 if (gl == NULL) {
438 /* omit RSS and rsp_ctrl at end of descriptor */
439 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
440
441 skb = alloc_skb(256, GFP_ATOMIC);
442 if (!skb)
443 goto nomem;
444 __skb_put(skb, len);
445 skb_copy_to_linear_data(skb, &rsp[1], len);
446 } else if (gl == CXGB4_MSG_AN) {
447 const struct rsp_ctrl *rc = (void *)rsp;
448
449 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
450 c4iw_ev_handler(dev, qid);
451 return 0;
452 } else {
453 skb = t4_pktgl_to_skb(gl, 128, 128);
454 if (unlikely(!skb))
455 goto nomem;
456 }
457
458 rpl = cplhdr(skb);
459 opcode = rpl->ot.opcode;
460
461 if (c4iw_handlers[opcode])
462 c4iw_handlers[opcode](dev, skb);
463 else
464 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
465 opcode);
466
467 return 0;
468nomem:
469 return -1;
470}
471
472static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
473{
474 PDBG("%s new_state %u\n", __func__, new_state);
475 return 0;
476}
477
478static struct cxgb4_uld_info c4iw_uld_info = {
479 .name = DRV_NAME,
480 .add = c4iw_uld_add,
481 .rx_handler = c4iw_uld_rx_handler,
482 .state_change = c4iw_uld_state_change,
483};
484
485static int __init c4iw_init_module(void)
486{
487 int err;
488
489 err = c4iw_cm_init();
490 if (err)
491 return err;
492
493 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
494 if (!c4iw_debugfs_root)
495 printk(KERN_WARNING MOD
496 "could not create debugfs entry, continuing\n");
497
498 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
499
500 return 0;
501}
502
503static void __exit c4iw_exit_module(void)
504{
505 struct c4iw_dev *dev, *tmp;
506
507 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
508
509 mutex_lock(&dev_mutex);
510 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
511 c4iw_remove(dev);
512 }
513 mutex_unlock(&dev_mutex);
514
515 c4iw_cm_term();
516 debugfs_remove_recursive(c4iw_debugfs_root);
517}
518
519module_init(c4iw_init_module);
520module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 000000000000..491e76a0327f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/slab.h>
33#include <linux/mman.h>
34#include <net/sock.h>
35
36#include "iw_cxgb4.h"
37
38static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
39 struct c4iw_qp *qhp,
40 struct t4_cqe *err_cqe,
41 enum ib_event_type ib_event)
42{
43 struct ib_event event;
44 struct c4iw_qp_attributes attrs;
45
46 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
47 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
48 PDBG("%s AE received after RTS - "
49 "qp state %d qpid 0x%x status 0x%x\n", __func__,
50 qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
51 return;
52 }
53
54 printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
55 "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
56 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
57 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
58 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
59
60 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
61 attrs.next_state = C4IW_QP_STATE_TERMINATE;
62 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
63 &attrs, 1);
64 }
65
66 event.event = ib_event;
67 event.device = chp->ibcq.device;
68 if (ib_event == IB_EVENT_CQ_ERR)
69 event.element.cq = &chp->ibcq;
70 else
71 event.element.qp = &qhp->ibqp;
72 if (qhp->ibqp.event_handler)
73 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
74
75 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
76}
77
78void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
79{
80 struct c4iw_cq *chp;
81 struct c4iw_qp *qhp;
82 u32 cqid;
83
84 spin_lock(&dev->lock);
85 qhp = get_qhp(dev, CQE_QPID(err_cqe));
86 if (!qhp) {
87 printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
88 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
89 CQE_QPID(err_cqe),
90 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
91 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
92 CQE_WRID_LOW(err_cqe));
93 spin_unlock(&dev->lock);
94 goto out;
95 }
96
97 if (SQ_TYPE(err_cqe))
98 cqid = qhp->attr.scq;
99 else
100 cqid = qhp->attr.rcq;
101 chp = get_chp(dev, cqid);
102 if (!chp) {
103 printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
104 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
105 cqid, CQE_QPID(err_cqe),
106 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
107 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
108 CQE_WRID_LOW(err_cqe));
109 spin_unlock(&dev->lock);
110 goto out;
111 }
112
113 c4iw_qp_add_ref(&qhp->ibqp);
114 atomic_inc(&chp->refcnt);
115 spin_unlock(&dev->lock);
116
117 /* Bad incoming write */
118 if (RQ_TYPE(err_cqe) &&
119 (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
120 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
121 goto done;
122 }
123
124 switch (CQE_STATUS(err_cqe)) {
125
126 /* Completion Events */
127 case T4_ERR_SUCCESS:
128 printk(KERN_ERR MOD "AE with status 0!\n");
129 break;
130
131 case T4_ERR_STAG:
132 case T4_ERR_PDID:
133 case T4_ERR_QPID:
134 case T4_ERR_ACCESS:
135 case T4_ERR_WRAP:
136 case T4_ERR_BOUND:
137 case T4_ERR_INVALIDATE_SHARED_MR:
138 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
139 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
140 break;
141
142 /* Device Fatal Errors */
143 case T4_ERR_ECC:
144 case T4_ERR_ECC_PSTAG:
145 case T4_ERR_INTERNAL_ERR:
146 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
147 break;
148
149 /* QP Fatal Errors */
150 case T4_ERR_OUT_OF_RQE:
151 case T4_ERR_PBL_ADDR_BOUND:
152 case T4_ERR_CRC:
153 case T4_ERR_MARKER:
154 case T4_ERR_PDU_LEN_ERR:
155 case T4_ERR_DDP_VERSION:
156 case T4_ERR_RDMA_VERSION:
157 case T4_ERR_OPCODE:
158 case T4_ERR_DDP_QUEUE_NUM:
159 case T4_ERR_MSN:
160 case T4_ERR_TBIT:
161 case T4_ERR_MO:
162 case T4_ERR_MSN_GAP:
163 case T4_ERR_MSN_RANGE:
164 case T4_ERR_RQE_ADDR_BOUND:
165 case T4_ERR_IRD_OVERFLOW:
166 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
167 break;
168
169 default:
170 printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
171 CQE_STATUS(err_cqe), qhp->wq.sq.qid);
172 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
173 break;
174 }
175done:
176 if (atomic_dec_and_test(&chp->refcnt))
177 wake_up(&chp->wait);
178 c4iw_qp_rem_ref(&qhp->ibqp);
179out:
180 return;
181}
182
183int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
184{
185 struct c4iw_cq *chp;
186
187 chp = get_chp(dev, qid);
188 if (chp)
189 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
190 else
191 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
192 return 0;
193}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 000000000000..a6269981e815
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,745 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __IW_CXGB4_H__
32#define __IW_CXGB4_H__
33
34#include <linux/mutex.h>
35#include <linux/list.h>
36#include <linux/spinlock.h>
37#include <linux/idr.h>
38#include <linux/workqueue.h>
39#include <linux/netdevice.h>
40#include <linux/sched.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/inet.h>
44#include <linux/wait.h>
45#include <linux/kref.h>
46#include <linux/timer.h>
47#include <linux/io.h>
48#include <linux/kfifo.h>
49
50#include <asm/byteorder.h>
51
52#include <net/net_namespace.h>
53
54#include <rdma/ib_verbs.h>
55#include <rdma/iw_cm.h>
56
57#include "cxgb4.h"
58#include "cxgb4_uld.h"
59#include "l2t.h"
60#include "user.h"
61
62#define DRV_NAME "iw_cxgb4"
63#define MOD DRV_NAME ":"
64
65extern int c4iw_debug;
66#define PDBG(fmt, args...) \
67do { \
68 if (c4iw_debug) \
69 printk(MOD fmt, ## args); \
70} while (0)
71
72#include "t4.h"
73
74#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
75#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
76
77static inline void *cplhdr(struct sk_buff *skb)
78{
79 return skb->data;
80}
81
82#define C4IW_WR_TO (10*HZ)
83
84struct c4iw_wr_wait {
85 wait_queue_head_t wait;
86 int done;
87 int ret;
88};
89
90static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
91{
92 wr_waitp->ret = 0;
93 wr_waitp->done = 0;
94 init_waitqueue_head(&wr_waitp->wait);
95}
96
97struct c4iw_resource {
98 struct kfifo tpt_fifo;
99 spinlock_t tpt_fifo_lock;
100 struct kfifo qid_fifo;
101 spinlock_t qid_fifo_lock;
102 struct kfifo pdid_fifo;
103 spinlock_t pdid_fifo_lock;
104};
105
106struct c4iw_qid_list {
107 struct list_head entry;
108 u32 qid;
109};
110
111struct c4iw_dev_ucontext {
112 struct list_head qpids;
113 struct list_head cqids;
114 struct mutex lock;
115};
116
117enum c4iw_rdev_flags {
118 T4_FATAL_ERROR = (1<<0),
119};
120
121struct c4iw_rdev {
122 struct c4iw_resource resource;
123 unsigned long qpshift;
124 u32 qpmask;
125 unsigned long cqshift;
126 u32 cqmask;
127 struct c4iw_dev_ucontext uctx;
128 struct gen_pool *pbl_pool;
129 struct gen_pool *rqt_pool;
130 u32 flags;
131 struct cxgb4_lld_info lldi;
132};
133
134static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
135{
136 return rdev->flags & T4_FATAL_ERROR;
137}
138
139static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
140{
141 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
142}
143
144struct c4iw_dev {
145 struct ib_device ibdev;
146 struct c4iw_rdev rdev;
147 u32 device_cap_flags;
148 struct idr cqidr;
149 struct idr qpidr;
150 struct idr mmidr;
151 spinlock_t lock;
152 struct list_head entry;
153 struct delayed_work db_drop_task;
154 struct dentry *debugfs_root;
155};
156
157static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
158{
159 return container_of(ibdev, struct c4iw_dev, ibdev);
160}
161
162static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
163{
164 return container_of(rdev, struct c4iw_dev, rdev);
165}
166
167static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
168{
169 return idr_find(&rhp->cqidr, cqid);
170}
171
172static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
173{
174 return idr_find(&rhp->qpidr, qpid);
175}
176
177static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
178{
179 return idr_find(&rhp->mmidr, mmid);
180}
181
182static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
183 void *handle, u32 id)
184{
185 int ret;
186 int newid;
187
188 do {
189 if (!idr_pre_get(idr, GFP_KERNEL))
190 return -ENOMEM;
191 spin_lock_irq(&rhp->lock);
192 ret = idr_get_new_above(idr, handle, id, &newid);
193 BUG_ON(newid != id);
194 spin_unlock_irq(&rhp->lock);
195 } while (ret == -EAGAIN);
196
197 return ret;
198}
199
200static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
201{
202 spin_lock_irq(&rhp->lock);
203 idr_remove(idr, id);
204 spin_unlock_irq(&rhp->lock);
205}
206
207struct c4iw_pd {
208 struct ib_pd ibpd;
209 u32 pdid;
210 struct c4iw_dev *rhp;
211};
212
213static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
214{
215 return container_of(ibpd, struct c4iw_pd, ibpd);
216}
217
218struct tpt_attributes {
219 u64 len;
220 u64 va_fbo;
221 enum fw_ri_mem_perms perms;
222 u32 stag;
223 u32 pdid;
224 u32 qpid;
225 u32 pbl_addr;
226 u32 pbl_size;
227 u32 state:1;
228 u32 type:2;
229 u32 rsvd:1;
230 u32 remote_invaliate_disable:1;
231 u32 zbva:1;
232 u32 mw_bind_enable:1;
233 u32 page_size:5;
234};
235
236struct c4iw_mr {
237 struct ib_mr ibmr;
238 struct ib_umem *umem;
239 struct c4iw_dev *rhp;
240 u64 kva;
241 struct tpt_attributes attr;
242};
243
244static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
245{
246 return container_of(ibmr, struct c4iw_mr, ibmr);
247}
248
249struct c4iw_mw {
250 struct ib_mw ibmw;
251 struct c4iw_dev *rhp;
252 u64 kva;
253 struct tpt_attributes attr;
254};
255
256static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
257{
258 return container_of(ibmw, struct c4iw_mw, ibmw);
259}
260
261struct c4iw_fr_page_list {
262 struct ib_fast_reg_page_list ibpl;
263 DECLARE_PCI_UNMAP_ADDR(mapping);
264 dma_addr_t dma_addr;
265 struct c4iw_dev *dev;
266 int size;
267};
268
269static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
270 struct ib_fast_reg_page_list *ibpl)
271{
272 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
273}
274
275struct c4iw_cq {
276 struct ib_cq ibcq;
277 struct c4iw_dev *rhp;
278 struct t4_cq cq;
279 spinlock_t lock;
280 atomic_t refcnt;
281 wait_queue_head_t wait;
282};
283
284static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
285{
286 return container_of(ibcq, struct c4iw_cq, ibcq);
287}
288
289struct c4iw_mpa_attributes {
290 u8 initiator;
291 u8 recv_marker_enabled;
292 u8 xmit_marker_enabled;
293 u8 crc_enabled;
294 u8 version;
295 u8 p2p_type;
296};
297
298struct c4iw_qp_attributes {
299 u32 scq;
300 u32 rcq;
301 u32 sq_num_entries;
302 u32 rq_num_entries;
303 u32 sq_max_sges;
304 u32 sq_max_sges_rdma_write;
305 u32 rq_max_sges;
306 u32 state;
307 u8 enable_rdma_read;
308 u8 enable_rdma_write;
309 u8 enable_bind;
310 u8 enable_mmid0_fastreg;
311 u32 max_ord;
312 u32 max_ird;
313 u32 pd;
314 u32 next_state;
315 char terminate_buffer[52];
316 u32 terminate_msg_len;
317 u8 is_terminate_local;
318 struct c4iw_mpa_attributes mpa_attr;
319 struct c4iw_ep *llp_stream_handle;
320};
321
322struct c4iw_qp {
323 struct ib_qp ibqp;
324 struct c4iw_dev *rhp;
325 struct c4iw_ep *ep;
326 struct c4iw_qp_attributes attr;
327 struct t4_wq wq;
328 spinlock_t lock;
329 atomic_t refcnt;
330 wait_queue_head_t wait;
331 struct timer_list timer;
332};
333
334static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
335{
336 return container_of(ibqp, struct c4iw_qp, ibqp);
337}
338
339struct c4iw_ucontext {
340 struct ib_ucontext ibucontext;
341 struct c4iw_dev_ucontext uctx;
342 u32 key;
343 spinlock_t mmap_lock;
344 struct list_head mmaps;
345};
346
347static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
348{
349 return container_of(c, struct c4iw_ucontext, ibucontext);
350}
351
352struct c4iw_mm_entry {
353 struct list_head entry;
354 u64 addr;
355 u32 key;
356 unsigned len;
357};
358
359static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
360 u32 key, unsigned len)
361{
362 struct list_head *pos, *nxt;
363 struct c4iw_mm_entry *mm;
364
365 spin_lock(&ucontext->mmap_lock);
366 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
367
368 mm = list_entry(pos, struct c4iw_mm_entry, entry);
369 if (mm->key == key && mm->len == len) {
370 list_del_init(&mm->entry);
371 spin_unlock(&ucontext->mmap_lock);
372 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
373 key, (unsigned long long) mm->addr, mm->len);
374 return mm;
375 }
376 }
377 spin_unlock(&ucontext->mmap_lock);
378 return NULL;
379}
380
381static inline void insert_mmap(struct c4iw_ucontext *ucontext,
382 struct c4iw_mm_entry *mm)
383{
384 spin_lock(&ucontext->mmap_lock);
385 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
386 mm->key, (unsigned long long) mm->addr, mm->len);
387 list_add_tail(&mm->entry, &ucontext->mmaps);
388 spin_unlock(&ucontext->mmap_lock);
389}
390
391enum c4iw_qp_attr_mask {
392 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
393 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
394 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
395 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
396 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
397 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
398 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
399 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
400 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
401 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
402 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
403 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
404 C4IW_QP_ATTR_MAX_ORD |
405 C4IW_QP_ATTR_MAX_IRD |
406 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
407 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
408 C4IW_QP_ATTR_MPA_ATTR |
409 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
410};
411
412int c4iw_modify_qp(struct c4iw_dev *rhp,
413 struct c4iw_qp *qhp,
414 enum c4iw_qp_attr_mask mask,
415 struct c4iw_qp_attributes *attrs,
416 int internal);
417
418enum c4iw_qp_state {
419 C4IW_QP_STATE_IDLE,
420 C4IW_QP_STATE_RTS,
421 C4IW_QP_STATE_ERROR,
422 C4IW_QP_STATE_TERMINATE,
423 C4IW_QP_STATE_CLOSING,
424 C4IW_QP_STATE_TOT
425};
426
427static inline int c4iw_convert_state(enum ib_qp_state ib_state)
428{
429 switch (ib_state) {
430 case IB_QPS_RESET:
431 case IB_QPS_INIT:
432 return C4IW_QP_STATE_IDLE;
433 case IB_QPS_RTS:
434 return C4IW_QP_STATE_RTS;
435 case IB_QPS_SQD:
436 return C4IW_QP_STATE_CLOSING;
437 case IB_QPS_SQE:
438 return C4IW_QP_STATE_TERMINATE;
439 case IB_QPS_ERR:
440 return C4IW_QP_STATE_ERROR;
441 default:
442 return -1;
443 }
444}
445
446static inline u32 c4iw_ib_to_tpt_access(int a)
447{
448 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
449 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
450 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
451 FW_RI_MEM_ACCESS_LOCAL_READ;
452}
453
454static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
455{
456 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
457 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
458}
459
460enum c4iw_mmid_state {
461 C4IW_STAG_STATE_VALID,
462 C4IW_STAG_STATE_INVALID
463};
464
465#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
466
467#define MPA_KEY_REQ "MPA ID Req Frame"
468#define MPA_KEY_REP "MPA ID Rep Frame"
469
470#define MPA_MAX_PRIVATE_DATA 256
471#define MPA_REJECT 0x20
472#define MPA_CRC 0x40
473#define MPA_MARKERS 0x80
474#define MPA_FLAGS_MASK 0xE0
475
476#define c4iw_put_ep(ep) { \
477 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
478 ep, atomic_read(&((ep)->kref.refcount))); \
479 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
480 kref_put(&((ep)->kref), _c4iw_free_ep); \
481}
482
483#define c4iw_get_ep(ep) { \
484 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
485 ep, atomic_read(&((ep)->kref.refcount))); \
486 kref_get(&((ep)->kref)); \
487}
488void _c4iw_free_ep(struct kref *kref);
489
490struct mpa_message {
491 u8 key[16];
492 u8 flags;
493 u8 revision;
494 __be16 private_data_size;
495 u8 private_data[0];
496};
497
498struct terminate_message {
499 u8 layer_etype;
500 u8 ecode;
501 __be16 hdrct_rsvd;
502 u8 len_hdrs[0];
503};
504
505#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
506
507enum c4iw_layers_types {
508 LAYER_RDMAP = 0x00,
509 LAYER_DDP = 0x10,
510 LAYER_MPA = 0x20,
511 RDMAP_LOCAL_CATA = 0x00,
512 RDMAP_REMOTE_PROT = 0x01,
513 RDMAP_REMOTE_OP = 0x02,
514 DDP_LOCAL_CATA = 0x00,
515 DDP_TAGGED_ERR = 0x01,
516 DDP_UNTAGGED_ERR = 0x02,
517 DDP_LLP = 0x03
518};
519
520enum c4iw_rdma_ecodes {
521 RDMAP_INV_STAG = 0x00,
522 RDMAP_BASE_BOUNDS = 0x01,
523 RDMAP_ACC_VIOL = 0x02,
524 RDMAP_STAG_NOT_ASSOC = 0x03,
525 RDMAP_TO_WRAP = 0x04,
526 RDMAP_INV_VERS = 0x05,
527 RDMAP_INV_OPCODE = 0x06,
528 RDMAP_STREAM_CATA = 0x07,
529 RDMAP_GLOBAL_CATA = 0x08,
530 RDMAP_CANT_INV_STAG = 0x09,
531 RDMAP_UNSPECIFIED = 0xff
532};
533
534enum c4iw_ddp_ecodes {
535 DDPT_INV_STAG = 0x00,
536 DDPT_BASE_BOUNDS = 0x01,
537 DDPT_STAG_NOT_ASSOC = 0x02,
538 DDPT_TO_WRAP = 0x03,
539 DDPT_INV_VERS = 0x04,
540 DDPU_INV_QN = 0x01,
541 DDPU_INV_MSN_NOBUF = 0x02,
542 DDPU_INV_MSN_RANGE = 0x03,
543 DDPU_INV_MO = 0x04,
544 DDPU_MSG_TOOBIG = 0x05,
545 DDPU_INV_VERS = 0x06
546};
547
548enum c4iw_mpa_ecodes {
549 MPA_CRC_ERR = 0x02,
550 MPA_MARKER_ERR = 0x03
551};
552
553enum c4iw_ep_state {
554 IDLE = 0,
555 LISTEN,
556 CONNECTING,
557 MPA_REQ_WAIT,
558 MPA_REQ_SENT,
559 MPA_REQ_RCVD,
560 MPA_REP_SENT,
561 FPDU_MODE,
562 ABORTING,
563 CLOSING,
564 MORIBUND,
565 DEAD,
566};
567
568enum c4iw_ep_flags {
569 PEER_ABORT_IN_PROGRESS = 0,
570 ABORT_REQ_IN_PROGRESS = 1,
571 RELEASE_RESOURCES = 2,
572 CLOSE_SENT = 3,
573};
574
575struct c4iw_ep_common {
576 struct iw_cm_id *cm_id;
577 struct c4iw_qp *qp;
578 struct c4iw_dev *dev;
579 enum c4iw_ep_state state;
580 struct kref kref;
581 spinlock_t lock;
582 struct sockaddr_in local_addr;
583 struct sockaddr_in remote_addr;
584 wait_queue_head_t waitq;
585 int rpl_done;
586 int rpl_err;
587 unsigned long flags;
588};
589
590struct c4iw_listen_ep {
591 struct c4iw_ep_common com;
592 unsigned int stid;
593 int backlog;
594};
595
596struct c4iw_ep {
597 struct c4iw_ep_common com;
598 struct c4iw_ep *parent_ep;
599 struct timer_list timer;
600 struct list_head entry;
601 unsigned int atid;
602 u32 hwtid;
603 u32 snd_seq;
604 u32 rcv_seq;
605 struct l2t_entry *l2t;
606 struct dst_entry *dst;
607 struct sk_buff *mpa_skb;
608 struct c4iw_mpa_attributes mpa_attr;
609 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
610 unsigned int mpa_pkt_len;
611 u32 ird;
612 u32 ord;
613 u32 smac_idx;
614 u32 tx_chan;
615 u32 mtu;
616 u16 mss;
617 u16 emss;
618 u16 plen;
619 u16 rss_qid;
620 u16 txq_idx;
621 u8 tos;
622};
623
624static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
625{
626 return cm_id->provider_data;
627}
628
629static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
630{
631 return cm_id->provider_data;
632}
633
634static inline int compute_wscale(int win)
635{
636 int wscale = 0;
637
638 while (wscale < 14 && (65535<<wscale) < win)
639 wscale++;
640 return wscale;
641}
642
643typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
644
645int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
646 struct l2t_entry *l2t);
647void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
648 struct c4iw_dev_ucontext *uctx);
649u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
650void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
651int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
652int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
653int c4iw_pblpool_create(struct c4iw_rdev *rdev);
654int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
655void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
656void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
657void c4iw_destroy_resource(struct c4iw_resource *rscp);
658int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
659int c4iw_register_device(struct c4iw_dev *dev);
660void c4iw_unregister_device(struct c4iw_dev *dev);
661int __init c4iw_cm_init(void);
662void __exit c4iw_cm_term(void);
663void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
664 struct c4iw_dev_ucontext *uctx);
665void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
666 struct c4iw_dev_ucontext *uctx);
667int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
668int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
669 struct ib_send_wr **bad_wr);
670int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
671 struct ib_recv_wr **bad_wr);
672int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
673 struct ib_mw_bind *mw_bind);
674int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
675int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
676int c4iw_destroy_listen(struct iw_cm_id *cm_id);
677int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
678int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
679void c4iw_qp_add_ref(struct ib_qp *qp);
680void c4iw_qp_rem_ref(struct ib_qp *qp);
681void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
682struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
683 struct ib_device *device,
684 int page_list_len);
685struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
686int c4iw_dealloc_mw(struct ib_mw *mw);
687struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
688struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
689 u64 length, u64 virt, int acc,
690 struct ib_udata *udata);
691struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
692struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
693 struct ib_phys_buf *buffer_list,
694 int num_phys_buf,
695 int acc,
696 u64 *iova_start);
697int c4iw_reregister_phys_mem(struct ib_mr *mr,
698 int mr_rereg_mask,
699 struct ib_pd *pd,
700 struct ib_phys_buf *buffer_list,
701 int num_phys_buf,
702 int acc, u64 *iova_start);
703int c4iw_dereg_mr(struct ib_mr *ib_mr);
704int c4iw_destroy_cq(struct ib_cq *ib_cq);
705struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
706 int vector,
707 struct ib_ucontext *ib_context,
708 struct ib_udata *udata);
709int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
710int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
711int c4iw_destroy_qp(struct ib_qp *ib_qp);
712struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
713 struct ib_qp_init_attr *attrs,
714 struct ib_udata *udata);
715int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
716 int attr_mask, struct ib_udata *udata);
717struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
718u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
719void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
720u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
721void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
722int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
723void c4iw_flush_hw_cq(struct t4_cq *cq);
724void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
725void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
726int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
727int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
728int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
729int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
730u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
731int c4iw_post_zb_read(struct c4iw_qp *qhp);
732int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
733u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
734void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
735 struct c4iw_dev_ucontext *uctx);
736u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
737void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
738 struct c4iw_dev_ucontext *uctx);
739void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
740
741extern struct cxgb4_client t4c_client;
742extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
743extern int c4iw_max_read_depth;
744
745#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 000000000000..e54ff6d25691
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
34#include <asm/atomic.h>
35
36#include "iw_cxgb4.h"
37
38#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96
40
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42 void *data)
43{
44 struct sk_buff *skb;
45 struct ulp_mem_io *req;
46 struct ulptx_idata *sc;
47 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait;
50
51 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54 c4iw_init_wr_wait(&wr_wait);
55 for (i = 0; i < num_wqe; i++) {
56
57 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58 len;
59 wr_len = roundup(sizeof *req + sizeof *sc +
60 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61
62 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
63 if (!skb)
64 return -ENOMEM;
65 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66
67 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68 memset(req, 0, wr_len);
69 INIT_ULPTX_WR(req, wr_len, 0, 0);
70
71 if (i == (num_wqe-1)) {
72 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73 FW_WR_COMPL(1));
74 req->wr.wr_lo = (__force __be64)&wr_wait;
75 } else
76 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86
87 sc = (struct ulptx_idata *)(req + 1);
88 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
89 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90
91 to_dp = (u8 *)(sc + 1);
92 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93 if (data)
94 memcpy(to_dp, from_dp, copy_len);
95 else
96 memset(to_dp, 0, copy_len);
97 if (copy_len % T4_ULPTX_MIN_IO)
98 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99 (copy_len % T4_ULPTX_MIN_IO));
100 ret = c4iw_ofld_send(rdev, skb);
101 if (ret)
102 return ret;
103 len -= C4IW_MAX_INLINE_SIZE;
104 }
105
106 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
107 if (!wr_wait.done) {
108 printk(KERN_ERR MOD "Device %s not responding!\n",
109 pci_name(rdev->lldi.pdev));
110 rdev->flags = T4_FATAL_ERROR;
111 ret = -EIO;
112 } else
113 ret = wr_wait.ret;
114 return ret;
115}
116
117/*
118 * Build and write a TPT entry.
119 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
120 * pbl_size and pbl_addr
121 * OUT: stag index
122 */
123static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
124 u32 *stag, u8 stag_state, u32 pdid,
125 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
126 int bind_enabled, u32 zbva, u64 to,
127 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
128{
129 int err;
130 struct fw_ri_tpte tpt;
131 u32 stag_idx;
132 static atomic_t key;
133
134 if (c4iw_fatal_error(rdev))
135 return -EIO;
136
137 stag_state = stag_state > 0;
138 stag_idx = (*stag) >> 8;
139
140 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
141 stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
142 &rdev->resource.tpt_fifo_lock);
143 if (!stag_idx)
144 return -ENOMEM;
145 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
146 }
147 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
148 __func__, stag_state, type, pdid, stag_idx);
149
150 /* write TPT entry */
151 if (reset_tpt_entry)
152 memset(&tpt, 0, sizeof(tpt));
153 else {
154 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
155 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
156 V_FW_RI_TPTE_STAGSTATE(stag_state) |
157 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
158 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
159 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
160 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
161 FW_RI_VA_BASED_TO))|
162 V_FW_RI_TPTE_PS(page_size));
163 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
164 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
165 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
166 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
167 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
168 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
169 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
170 }
171 err = write_adapter_mem(rdev, stag_idx +
172 (rdev->lldi.vr->stag.start >> 5),
173 sizeof(tpt), &tpt);
174
175 if (reset_tpt_entry)
176 c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
177 &rdev->resource.tpt_fifo_lock);
178 return err;
179}
180
181static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
182 u32 pbl_addr, u32 pbl_size)
183{
184 int err;
185
186 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
188 pbl_size);
189
190 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
191 return err;
192}
193
194static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
195 u32 pbl_addr)
196{
197 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
198 pbl_size, pbl_addr);
199}
200
201static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
202{
203 *stag = T4_STAG_UNSET;
204 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
205 0UL, 0, 0, 0, 0);
206}
207
208static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
209{
210 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
211 0);
212}
213
214static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
215 u32 pbl_size, u32 pbl_addr)
216{
217 *stag = T4_STAG_UNSET;
218 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
219 0UL, 0, 0, pbl_size, pbl_addr);
220}
221
222static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
223{
224 u32 mmid;
225
226 mhp->attr.state = 1;
227 mhp->attr.stag = stag;
228 mmid = stag >> 8;
229 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
230 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
231 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
232}
233
234static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
235 struct c4iw_mr *mhp, int shift)
236{
237 u32 stag = T4_STAG_UNSET;
238 int ret;
239
240 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
241 FW_RI_STAG_NSMR, mhp->attr.perms,
242 mhp->attr.mw_bind_enable, mhp->attr.zbva,
243 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
244 mhp->attr.pbl_size, mhp->attr.pbl_addr);
245 if (ret)
246 return ret;
247
248 ret = finish_mem_reg(mhp, stag);
249 if (ret)
250 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
251 mhp->attr.pbl_addr);
252 return ret;
253}
254
255static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
256 struct c4iw_mr *mhp, int shift, int npages)
257{
258 u32 stag;
259 int ret;
260
261 if (npages > mhp->attr.pbl_size)
262 return -ENOMEM;
263
264 stag = mhp->attr.stag;
265 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266 FW_RI_STAG_NSMR, mhp->attr.perms,
267 mhp->attr.mw_bind_enable, mhp->attr.zbva,
268 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
269 mhp->attr.pbl_size, mhp->attr.pbl_addr);
270 if (ret)
271 return ret;
272
273 ret = finish_mem_reg(mhp, stag);
274 if (ret)
275 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
276 mhp->attr.pbl_addr);
277
278 return ret;
279}
280
281static int alloc_pbl(struct c4iw_mr *mhp, int npages)
282{
283 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
284 npages << 3);
285
286 if (!mhp->attr.pbl_addr)
287 return -ENOMEM;
288
289 mhp->attr.pbl_size = npages;
290
291 return 0;
292}
293
294static int build_phys_page_list(struct ib_phys_buf *buffer_list,
295 int num_phys_buf, u64 *iova_start,
296 u64 *total_size, int *npages,
297 int *shift, __be64 **page_list)
298{
299 u64 mask;
300 int i, j, n;
301
302 mask = 0;
303 *total_size = 0;
304 for (i = 0; i < num_phys_buf; ++i) {
305 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
306 return -EINVAL;
307 if (i != 0 && i != num_phys_buf - 1 &&
308 (buffer_list[i].size & ~PAGE_MASK))
309 return -EINVAL;
310 *total_size += buffer_list[i].size;
311 if (i > 0)
312 mask |= buffer_list[i].addr;
313 else
314 mask |= buffer_list[i].addr & PAGE_MASK;
315 if (i != num_phys_buf - 1)
316 mask |= buffer_list[i].addr + buffer_list[i].size;
317 else
318 mask |= (buffer_list[i].addr + buffer_list[i].size +
319 PAGE_SIZE - 1) & PAGE_MASK;
320 }
321
322 if (*total_size > 0xFFFFFFFFULL)
323 return -ENOMEM;
324
325 /* Find largest page shift we can use to cover buffers */
326 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
327 if ((1ULL << *shift) & mask)
328 break;
329
330 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
331 buffer_list[0].addr &= ~0ull << *shift;
332
333 *npages = 0;
334 for (i = 0; i < num_phys_buf; ++i)
335 *npages += (buffer_list[i].size +
336 (1ULL << *shift) - 1) >> *shift;
337
338 if (!*npages)
339 return -EINVAL;
340
341 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
342 if (!*page_list)
343 return -ENOMEM;
344
345 n = 0;
346 for (i = 0; i < num_phys_buf; ++i)
347 for (j = 0;
348 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
349 ++j)
350 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
351 ((u64) j << *shift));
352
353 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354 __func__, (unsigned long long)*iova_start,
355 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
356 *npages);
357
358 return 0;
359
360}
361
362int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
363 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
364 int num_phys_buf, int acc, u64 *iova_start)
365{
366
367 struct c4iw_mr mh, *mhp;
368 struct c4iw_pd *php;
369 struct c4iw_dev *rhp;
370 __be64 *page_list = NULL;
371 int shift = 0;
372 u64 total_size;
373 int npages;
374 int ret;
375
376 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
377
378 /* There can be no memory windows */
379 if (atomic_read(&mr->usecnt))
380 return -EINVAL;
381
382 mhp = to_c4iw_mr(mr);
383 rhp = mhp->rhp;
384 php = to_c4iw_pd(mr->pd);
385
386 /* make sure we are on the same adapter */
387 if (rhp != php->rhp)
388 return -EINVAL;
389
390 memcpy(&mh, mhp, sizeof *mhp);
391
392 if (mr_rereg_mask & IB_MR_REREG_PD)
393 php = to_c4iw_pd(pd);
394 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
395 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
396 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
397 IB_ACCESS_MW_BIND;
398 }
399 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
400 ret = build_phys_page_list(buffer_list, num_phys_buf,
401 iova_start,
402 &total_size, &npages,
403 &shift, &page_list);
404 if (ret)
405 return ret;
406 }
407
408 ret = reregister_mem(rhp, php, &mh, shift, npages);
409 kfree(page_list);
410 if (ret)
411 return ret;
412 if (mr_rereg_mask & IB_MR_REREG_PD)
413 mhp->attr.pdid = php->pdid;
414 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
415 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
416 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
417 mhp->attr.zbva = 0;
418 mhp->attr.va_fbo = *iova_start;
419 mhp->attr.page_size = shift - 12;
420 mhp->attr.len = (u32) total_size;
421 mhp->attr.pbl_size = npages;
422 }
423
424 return 0;
425}
426
427struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
428 struct ib_phys_buf *buffer_list,
429 int num_phys_buf, int acc, u64 *iova_start)
430{
431 __be64 *page_list;
432 int shift;
433 u64 total_size;
434 int npages;
435 struct c4iw_dev *rhp;
436 struct c4iw_pd *php;
437 struct c4iw_mr *mhp;
438 int ret;
439
440 PDBG("%s ib_pd %p\n", __func__, pd);
441 php = to_c4iw_pd(pd);
442 rhp = php->rhp;
443
444 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
445 if (!mhp)
446 return ERR_PTR(-ENOMEM);
447
448 mhp->rhp = rhp;
449
450 /* First check that we have enough alignment */
451 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
452 ret = -EINVAL;
453 goto err;
454 }
455
456 if (num_phys_buf > 1 &&
457 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
458 ret = -EINVAL;
459 goto err;
460 }
461
462 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
463 &total_size, &npages, &shift,
464 &page_list);
465 if (ret)
466 goto err;
467
468 ret = alloc_pbl(mhp, npages);
469 if (ret) {
470 kfree(page_list);
471 goto err_pbl;
472 }
473
474 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
475 npages);
476 kfree(page_list);
477 if (ret)
478 goto err_pbl;
479
480 mhp->attr.pdid = php->pdid;
481 mhp->attr.zbva = 0;
482
483 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
484 mhp->attr.va_fbo = *iova_start;
485 mhp->attr.page_size = shift - 12;
486
487 mhp->attr.len = (u32) total_size;
488 mhp->attr.pbl_size = npages;
489 ret = register_mem(rhp, php, mhp, shift);
490 if (ret)
491 goto err_pbl;
492
493 return &mhp->ibmr;
494
495err_pbl:
496 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
497 mhp->attr.pbl_size << 3);
498
499err:
500 kfree(mhp);
501 return ERR_PTR(ret);
502
503}
504
505struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
506{
507 struct c4iw_dev *rhp;
508 struct c4iw_pd *php;
509 struct c4iw_mr *mhp;
510 int ret;
511 u32 stag = T4_STAG_UNSET;
512
513 PDBG("%s ib_pd %p\n", __func__, pd);
514 php = to_c4iw_pd(pd);
515 rhp = php->rhp;
516
517 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
518 if (!mhp)
519 return ERR_PTR(-ENOMEM);
520
521 mhp->rhp = rhp;
522 mhp->attr.pdid = php->pdid;
523 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
524 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
525 mhp->attr.zbva = 0;
526 mhp->attr.va_fbo = 0;
527 mhp->attr.page_size = 0;
528 mhp->attr.len = ~0UL;
529 mhp->attr.pbl_size = 0;
530
531 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
532 FW_RI_STAG_NSMR, mhp->attr.perms,
533 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
534 if (ret)
535 goto err1;
536
537 ret = finish_mem_reg(mhp, stag);
538 if (ret)
539 goto err2;
540 return &mhp->ibmr;
541err2:
542 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
543 mhp->attr.pbl_addr);
544err1:
545 kfree(mhp);
546 return ERR_PTR(ret);
547}
548
549struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
550 u64 virt, int acc, struct ib_udata *udata)
551{
552 __be64 *pages;
553 int shift, n, len;
554 int i, j, k;
555 int err = 0;
556 struct ib_umem_chunk *chunk;
557 struct c4iw_dev *rhp;
558 struct c4iw_pd *php;
559 struct c4iw_mr *mhp;
560
561 PDBG("%s ib_pd %p\n", __func__, pd);
562
563 if (length == ~0ULL)
564 return ERR_PTR(-EINVAL);
565
566 if ((length + start) < start)
567 return ERR_PTR(-EINVAL);
568
569 php = to_c4iw_pd(pd);
570 rhp = php->rhp;
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572 if (!mhp)
573 return ERR_PTR(-ENOMEM);
574
575 mhp->rhp = rhp;
576
577 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
578 if (IS_ERR(mhp->umem)) {
579 err = PTR_ERR(mhp->umem);
580 kfree(mhp);
581 return ERR_PTR(err);
582 }
583
584 shift = ffs(mhp->umem->page_size) - 1;
585
586 n = 0;
587 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
588 n += chunk->nents;
589
590 err = alloc_pbl(mhp, n);
591 if (err)
592 goto err;
593
594 pages = (__be64 *) __get_free_page(GFP_KERNEL);
595 if (!pages) {
596 err = -ENOMEM;
597 goto err_pbl;
598 }
599
600 i = n = 0;
601
602 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
603 for (j = 0; j < chunk->nmap; ++j) {
604 len = sg_dma_len(&chunk->page_list[j]) >> shift;
605 for (k = 0; k < len; ++k) {
606 pages[i++] = cpu_to_be64(sg_dma_address(
607 &chunk->page_list[j]) +
608 mhp->umem->page_size * k);
609 if (i == PAGE_SIZE / sizeof *pages) {
610 err = write_pbl(&mhp->rhp->rdev,
611 pages,
612 mhp->attr.pbl_addr + (n << 3), i);
613 if (err)
614 goto pbl_done;
615 n += i;
616 i = 0;
617 }
618 }
619 }
620
621 if (i)
622 err = write_pbl(&mhp->rhp->rdev, pages,
623 mhp->attr.pbl_addr + (n << 3), i);
624
625pbl_done:
626 free_page((unsigned long) pages);
627 if (err)
628 goto err_pbl;
629
630 mhp->attr.pdid = php->pdid;
631 mhp->attr.zbva = 0;
632 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
633 mhp->attr.va_fbo = virt;
634 mhp->attr.page_size = shift - 12;
635 mhp->attr.len = (u32) length;
636
637 err = register_mem(rhp, php, mhp, shift);
638 if (err)
639 goto err_pbl;
640
641 return &mhp->ibmr;
642
643err_pbl:
644 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
645 mhp->attr.pbl_size << 3);
646
647err:
648 ib_umem_release(mhp->umem);
649 kfree(mhp);
650 return ERR_PTR(err);
651}
652
653struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
654{
655 struct c4iw_dev *rhp;
656 struct c4iw_pd *php;
657 struct c4iw_mw *mhp;
658 u32 mmid;
659 u32 stag = 0;
660 int ret;
661
662 php = to_c4iw_pd(pd);
663 rhp = php->rhp;
664 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
665 if (!mhp)
666 return ERR_PTR(-ENOMEM);
667 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
668 if (ret) {
669 kfree(mhp);
670 return ERR_PTR(ret);
671 }
672 mhp->rhp = rhp;
673 mhp->attr.pdid = php->pdid;
674 mhp->attr.type = FW_RI_STAG_MW;
675 mhp->attr.stag = stag;
676 mmid = (stag) >> 8;
677 mhp->ibmw.rkey = stag;
678 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
679 deallocate_window(&rhp->rdev, mhp->attr.stag);
680 kfree(mhp);
681 return ERR_PTR(-ENOMEM);
682 }
683 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
684 return &(mhp->ibmw);
685}
686
687int c4iw_dealloc_mw(struct ib_mw *mw)
688{
689 struct c4iw_dev *rhp;
690 struct c4iw_mw *mhp;
691 u32 mmid;
692
693 mhp = to_c4iw_mw(mw);
694 rhp = mhp->rhp;
695 mmid = (mw->rkey) >> 8;
696 deallocate_window(&rhp->rdev, mhp->attr.stag);
697 remove_handle(rhp, &rhp->mmidr, mmid);
698 kfree(mhp);
699 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
700 return 0;
701}
702
703struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
704{
705 struct c4iw_dev *rhp;
706 struct c4iw_pd *php;
707 struct c4iw_mr *mhp;
708 u32 mmid;
709 u32 stag = 0;
710 int ret = 0;
711
712 php = to_c4iw_pd(pd);
713 rhp = php->rhp;
714 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
715 if (!mhp)
716 goto err;
717
718 mhp->rhp = rhp;
719 ret = alloc_pbl(mhp, pbl_depth);
720 if (ret)
721 goto err1;
722 mhp->attr.pbl_size = pbl_depth;
723 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
724 mhp->attr.pbl_size, mhp->attr.pbl_addr);
725 if (ret)
726 goto err2;
727 mhp->attr.pdid = php->pdid;
728 mhp->attr.type = FW_RI_STAG_NSMR;
729 mhp->attr.stag = stag;
730 mhp->attr.state = 1;
731 mmid = (stag) >> 8;
732 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
733 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
734 goto err3;
735
736 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
737 return &(mhp->ibmr);
738err3:
739 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
740 mhp->attr.pbl_addr);
741err2:
742 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
743 mhp->attr.pbl_size << 3);
744err1:
745 kfree(mhp);
746err:
747 return ERR_PTR(ret);
748}
749
750struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
751 int page_list_len)
752{
753 struct c4iw_fr_page_list *c4pl;
754 struct c4iw_dev *dev = to_c4iw_dev(device);
755 dma_addr_t dma_addr;
756 int size = sizeof *c4pl + page_list_len * sizeof(u64);
757
758 if (page_list_len > T4_MAX_FR_DEPTH)
759 return ERR_PTR(-EINVAL);
760
761 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
762 &dma_addr, GFP_KERNEL);
763 if (!c4pl)
764 return ERR_PTR(-ENOMEM);
765
766 pci_unmap_addr_set(c4pl, mapping, dma_addr);
767 c4pl->dma_addr = dma_addr;
768 c4pl->dev = dev;
769 c4pl->size = size;
770 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
771 c4pl->ibpl.max_page_list_len = page_list_len;
772
773 return &c4pl->ibpl;
774}
775
776void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
777{
778 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
779
780 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
781 c4pl, pci_unmap_addr(c4pl, mapping));
782}
783
784int c4iw_dereg_mr(struct ib_mr *ib_mr)
785{
786 struct c4iw_dev *rhp;
787 struct c4iw_mr *mhp;
788 u32 mmid;
789
790 PDBG("%s ib_mr %p\n", __func__, ib_mr);
791 /* There can be no memory windows */
792 if (atomic_read(&ib_mr->usecnt))
793 return -EINVAL;
794
795 mhp = to_c4iw_mr(ib_mr);
796 rhp = mhp->rhp;
797 mmid = mhp->attr.stag >> 8;
798 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
799 mhp->attr.pbl_addr);
800 if (mhp->attr.pbl_size)
801 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
802 mhp->attr.pbl_size << 3);
803 remove_handle(rhp, &rhp->mmidr, mmid);
804 if (mhp->kva)
805 kfree((void *) (unsigned long) mhp->kva);
806 if (mhp->umem)
807 ib_umem_release(mhp->umem);
808 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
809 kfree(mhp);
810 return 0;
811}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 000000000000..dfc49020bb9c
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,518 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
40#include <linux/spinlock.h>
41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
44#include <linux/io.h>
45
46#include <asm/irq.h>
47#include <asm/byteorder.h>
48
49#include <rdma/iw_cm.h>
50#include <rdma/ib_verbs.h>
51#include <rdma/ib_smi.h>
52#include <rdma/ib_umem.h>
53#include <rdma/ib_user_verbs.h>
54
55#include "iw_cxgb4.h"
56
57static int fastreg_support;
58module_param(fastreg_support, int, 0644);
59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
60
61static int c4iw_modify_port(struct ib_device *ibdev,
62 u8 port, int port_modify_mask,
63 struct ib_port_modify *props)
64{
65 return -ENOSYS;
66}
67
68static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
69 struct ib_ah_attr *ah_attr)
70{
71 return ERR_PTR(-ENOSYS);
72}
73
74static int c4iw_ah_destroy(struct ib_ah *ah)
75{
76 return -ENOSYS;
77}
78
79static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
80{
81 return -ENOSYS;
82}
83
84static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
85{
86 return -ENOSYS;
87}
88
89static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
90 u8 port_num, struct ib_wc *in_wc,
91 struct ib_grh *in_grh, struct ib_mad *in_mad,
92 struct ib_mad *out_mad)
93{
94 return -ENOSYS;
95}
96
97static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
98{
99 struct c4iw_dev *rhp = to_c4iw_dev(context->device);
100 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
101 struct c4iw_mm_entry *mm, *tmp;
102
103 PDBG("%s context %p\n", __func__, context);
104 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
105 kfree(mm);
106 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
107 kfree(ucontext);
108 return 0;
109}
110
111static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
112 struct ib_udata *udata)
113{
114 struct c4iw_ucontext *context;
115 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
116
117 PDBG("%s ibdev %p\n", __func__, ibdev);
118 context = kzalloc(sizeof(*context), GFP_KERNEL);
119 if (!context)
120 return ERR_PTR(-ENOMEM);
121 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
122 INIT_LIST_HEAD(&context->mmaps);
123 spin_lock_init(&context->mmap_lock);
124 return &context->ibucontext;
125}
126
127static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
128{
129 int len = vma->vm_end - vma->vm_start;
130 u32 key = vma->vm_pgoff << PAGE_SHIFT;
131 struct c4iw_rdev *rdev;
132 int ret = 0;
133 struct c4iw_mm_entry *mm;
134 struct c4iw_ucontext *ucontext;
135 u64 addr;
136
137 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
138 key, len);
139
140 if (vma->vm_start & (PAGE_SIZE-1))
141 return -EINVAL;
142
143 rdev = &(to_c4iw_dev(context->device)->rdev);
144 ucontext = to_c4iw_ucontext(context);
145
146 mm = remove_mmap(ucontext, key, len);
147 if (!mm)
148 return -EINVAL;
149 addr = mm->addr;
150 kfree(mm);
151
152 if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
153 (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
154 pci_resource_len(rdev->lldi.pdev, 2)))) {
155
156 /*
157 * Map T4 DB register.
158 */
159 if (vma->vm_flags & VM_READ)
160 return -EPERM;
161
162 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
163 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
164 vma->vm_flags &= ~VM_MAYREAD;
165 ret = io_remap_pfn_range(vma, vma->vm_start,
166 addr >> PAGE_SHIFT,
167 len, vma->vm_page_prot);
168 } else {
169
170 /*
171 * Map WQ or CQ contig dma memory...
172 */
173 ret = remap_pfn_range(vma, vma->vm_start,
174 addr >> PAGE_SHIFT,
175 len, vma->vm_page_prot);
176 }
177
178 return ret;
179}
180
181static int c4iw_deallocate_pd(struct ib_pd *pd)
182{
183 struct c4iw_dev *rhp;
184 struct c4iw_pd *php;
185
186 php = to_c4iw_pd(pd);
187 rhp = php->rhp;
188 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
189 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
190 &rhp->rdev.resource.pdid_fifo_lock);
191 kfree(php);
192 return 0;
193}
194
195static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
196 struct ib_ucontext *context,
197 struct ib_udata *udata)
198{
199 struct c4iw_pd *php;
200 u32 pdid;
201 struct c4iw_dev *rhp;
202
203 PDBG("%s ibdev %p\n", __func__, ibdev);
204 rhp = (struct c4iw_dev *) ibdev;
205 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
206 &rhp->rdev.resource.pdid_fifo_lock);
207 if (!pdid)
208 return ERR_PTR(-EINVAL);
209 php = kzalloc(sizeof(*php), GFP_KERNEL);
210 if (!php) {
211 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
212 &rhp->rdev.resource.pdid_fifo_lock);
213 return ERR_PTR(-ENOMEM);
214 }
215 php->pdid = pdid;
216 php->rhp = rhp;
217 if (context) {
218 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
219 c4iw_deallocate_pd(&php->ibpd);
220 return ERR_PTR(-EFAULT);
221 }
222 }
223 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
224 return &php->ibpd;
225}
226
227static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
228 u16 *pkey)
229{
230 PDBG("%s ibdev %p\n", __func__, ibdev);
231 *pkey = 0;
232 return 0;
233}
234
235static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
236 union ib_gid *gid)
237{
238 struct c4iw_dev *dev;
239
240 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
241 __func__, ibdev, port, index, gid);
242 dev = to_c4iw_dev(ibdev);
243 BUG_ON(port == 0);
244 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
245 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
246 return 0;
247}
248
249static int c4iw_query_device(struct ib_device *ibdev,
250 struct ib_device_attr *props)
251{
252
253 struct c4iw_dev *dev;
254 PDBG("%s ibdev %p\n", __func__, ibdev);
255
256 dev = to_c4iw_dev(ibdev);
257 memset(props, 0, sizeof *props);
258 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
259 props->hw_ver = dev->rdev.lldi.adapter_type;
260 props->fw_ver = dev->rdev.lldi.fw_vers;
261 props->device_cap_flags = dev->device_cap_flags;
262 props->page_size_cap = T4_PAGESIZE_MASK;
263 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
264 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
265 props->max_mr_size = T4_MAX_MR_SIZE;
266 props->max_qp = T4_MAX_NUM_QP;
267 props->max_qp_wr = T4_MAX_QP_DEPTH;
268 props->max_sge = T4_MAX_RECV_SGE;
269 props->max_sge_rd = 1;
270 props->max_qp_rd_atom = c4iw_max_read_depth;
271 props->max_qp_init_rd_atom = c4iw_max_read_depth;
272 props->max_cq = T4_MAX_NUM_CQ;
273 props->max_cqe = T4_MAX_CQ_DEPTH;
274 props->max_mr = c4iw_num_stags(&dev->rdev);
275 props->max_pd = T4_MAX_NUM_PD;
276 props->local_ca_ack_delay = 0;
277 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
278
279 return 0;
280}
281
282static int c4iw_query_port(struct ib_device *ibdev, u8 port,
283 struct ib_port_attr *props)
284{
285 struct c4iw_dev *dev;
286 struct net_device *netdev;
287 struct in_device *inetdev;
288
289 PDBG("%s ibdev %p\n", __func__, ibdev);
290
291 dev = to_c4iw_dev(ibdev);
292 netdev = dev->rdev.lldi.ports[port-1];
293
294 memset(props, 0, sizeof(struct ib_port_attr));
295 props->max_mtu = IB_MTU_4096;
296 if (netdev->mtu >= 4096)
297 props->active_mtu = IB_MTU_4096;
298 else if (netdev->mtu >= 2048)
299 props->active_mtu = IB_MTU_2048;
300 else if (netdev->mtu >= 1024)
301 props->active_mtu = IB_MTU_1024;
302 else if (netdev->mtu >= 512)
303 props->active_mtu = IB_MTU_512;
304 else
305 props->active_mtu = IB_MTU_256;
306
307 if (!netif_carrier_ok(netdev))
308 props->state = IB_PORT_DOWN;
309 else {
310 inetdev = in_dev_get(netdev);
311 if (inetdev) {
312 if (inetdev->ifa_list)
313 props->state = IB_PORT_ACTIVE;
314 else
315 props->state = IB_PORT_INIT;
316 in_dev_put(inetdev);
317 } else
318 props->state = IB_PORT_INIT;
319 }
320
321 props->port_cap_flags =
322 IB_PORT_CM_SUP |
323 IB_PORT_SNMP_TUNNEL_SUP |
324 IB_PORT_REINIT_SUP |
325 IB_PORT_DEVICE_MGMT_SUP |
326 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
327 props->gid_tbl_len = 1;
328 props->pkey_tbl_len = 1;
329 props->active_width = 2;
330 props->active_speed = 2;
331 props->max_msg_sz = -1;
332
333 return 0;
334}
335
336static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
337 char *buf)
338{
339 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
340 ibdev.dev);
341 PDBG("%s dev 0x%p\n", __func__, dev);
342 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
343}
344
345static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
346 char *buf)
347{
348 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
349 ibdev.dev);
350 PDBG("%s dev 0x%p\n", __func__, dev);
351
352 return sprintf(buf, "%u.%u.%u.%u\n",
353 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
354 FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
355 FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
356 FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
357}
358
359static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
360 char *buf)
361{
362 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
363 ibdev.dev);
364 struct ethtool_drvinfo info;
365 struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
366
367 PDBG("%s dev 0x%p\n", __func__, dev);
368 lldev->ethtool_ops->get_drvinfo(lldev, &info);
369 return sprintf(buf, "%s\n", info.driver);
370}
371
372static ssize_t show_board(struct device *dev, struct device_attribute *attr,
373 char *buf)
374{
375 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
376 ibdev.dev);
377 PDBG("%s dev 0x%p\n", __func__, dev);
378 return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
379 c4iw_dev->rdev.lldi.pdev->device);
380}
381
382static int c4iw_get_mib(struct ib_device *ibdev,
383 union rdma_protocol_stats *stats)
384{
385 return -ENOSYS;
386}
387
388static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
389static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
390static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
391static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
392
393static struct device_attribute *c4iw_class_attributes[] = {
394 &dev_attr_hw_rev,
395 &dev_attr_fw_ver,
396 &dev_attr_hca_type,
397 &dev_attr_board_id,
398};
399
400int c4iw_register_device(struct c4iw_dev *dev)
401{
402 int ret;
403 int i;
404
405 PDBG("%s c4iw_dev %p\n", __func__, dev);
406 BUG_ON(!dev->rdev.lldi.ports[0]);
407 strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
408 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
409 memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
410 dev->ibdev.owner = THIS_MODULE;
411 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
412 if (fastreg_support)
413 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
414 dev->ibdev.local_dma_lkey = 0;
415 dev->ibdev.uverbs_cmd_mask =
416 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
417 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
419 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
420 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
421 (1ull << IB_USER_VERBS_CMD_REG_MR) |
422 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
423 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
424 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
425 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
426 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
427 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
428 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
429 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
430 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
431 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
432 (1ull << IB_USER_VERBS_CMD_POST_RECV);
433 dev->ibdev.node_type = RDMA_NODE_RNIC;
434 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
435 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
436 dev->ibdev.num_comp_vectors = 1;
437 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
438 dev->ibdev.query_device = c4iw_query_device;
439 dev->ibdev.query_port = c4iw_query_port;
440 dev->ibdev.modify_port = c4iw_modify_port;
441 dev->ibdev.query_pkey = c4iw_query_pkey;
442 dev->ibdev.query_gid = c4iw_query_gid;
443 dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
444 dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
445 dev->ibdev.mmap = c4iw_mmap;
446 dev->ibdev.alloc_pd = c4iw_allocate_pd;
447 dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
448 dev->ibdev.create_ah = c4iw_ah_create;
449 dev->ibdev.destroy_ah = c4iw_ah_destroy;
450 dev->ibdev.create_qp = c4iw_create_qp;
451 dev->ibdev.modify_qp = c4iw_ib_modify_qp;
452 dev->ibdev.destroy_qp = c4iw_destroy_qp;
453 dev->ibdev.create_cq = c4iw_create_cq;
454 dev->ibdev.destroy_cq = c4iw_destroy_cq;
455 dev->ibdev.resize_cq = c4iw_resize_cq;
456 dev->ibdev.poll_cq = c4iw_poll_cq;
457 dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
458 dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
459 dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
460 dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
461 dev->ibdev.dereg_mr = c4iw_dereg_mr;
462 dev->ibdev.alloc_mw = c4iw_alloc_mw;
463 dev->ibdev.bind_mw = c4iw_bind_mw;
464 dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
465 dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
466 dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
467 dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
468 dev->ibdev.attach_mcast = c4iw_multicast_attach;
469 dev->ibdev.detach_mcast = c4iw_multicast_detach;
470 dev->ibdev.process_mad = c4iw_process_mad;
471 dev->ibdev.req_notify_cq = c4iw_arm_cq;
472 dev->ibdev.post_send = c4iw_post_send;
473 dev->ibdev.post_recv = c4iw_post_receive;
474 dev->ibdev.get_protocol_stats = c4iw_get_mib;
475
476 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
477 if (!dev->ibdev.iwcm)
478 return -ENOMEM;
479
480 dev->ibdev.iwcm->connect = c4iw_connect;
481 dev->ibdev.iwcm->accept = c4iw_accept_cr;
482 dev->ibdev.iwcm->reject = c4iw_reject_cr;
483 dev->ibdev.iwcm->create_listen = c4iw_create_listen;
484 dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
485 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
486 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
487 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
488
489 ret = ib_register_device(&dev->ibdev);
490 if (ret)
491 goto bail1;
492
493 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
494 ret = device_create_file(&dev->ibdev.dev,
495 c4iw_class_attributes[i]);
496 if (ret)
497 goto bail2;
498 }
499 return 0;
500bail2:
501 ib_unregister_device(&dev->ibdev);
502bail1:
503 kfree(dev->ibdev.iwcm);
504 return ret;
505}
506
507void c4iw_unregister_device(struct c4iw_dev *dev)
508{
509 int i;
510
511 PDBG("%s c4iw_dev %p\n", __func__, dev);
512 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
513 device_remove_file(&dev->ibdev.dev,
514 c4iw_class_attributes[i]);
515 ib_unregister_device(&dev->ibdev);
516 kfree(dev->ibdev.iwcm);
517 return;
518}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 000000000000..83a01dc0c4c1
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1577 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
133 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (u64)&wr_wait;
148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping));
221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping));
225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
238static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
239{
240 int i;
241 u32 plen;
242 int size;
243 u8 *datap;
244
245 if (wr->num_sge > T4_MAX_SEND_SGE)
246 return -EINVAL;
247 switch (wr->opcode) {
248 case IB_WR_SEND:
249 if (wr->send_flags & IB_SEND_SOLICITED)
250 wqe->send.sendop_pkd = cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
252 else
253 wqe->send.sendop_pkd = cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
255 wqe->send.stag_inv = 0;
256 break;
257 case IB_WR_SEND_WITH_INV:
258 if (wr->send_flags & IB_SEND_SOLICITED)
259 wqe->send.sendop_pkd = cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
261 else
262 wqe->send.sendop_pkd = cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
264 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
265 break;
266
267 default:
268 return -EINVAL;
269 }
270 plen = 0;
271 if (wr->num_sge) {
272 if (wr->send_flags & IB_SEND_INLINE) {
273 datap = (u8 *)wqe->send.u.immd_src[0].data;
274 for (i = 0; i < wr->num_sge; i++) {
275 if ((plen + wr->sg_list[i].length) >
276 T4_MAX_SEND_INLINE) {
277 return -EMSGSIZE;
278 }
279 plen += wr->sg_list[i].length;
280 memcpy(datap,
281 (void *)(unsigned long)wr->sg_list[i].addr,
282 wr->sg_list[i].length);
283 datap += wr->sg_list[i].length;
284 }
285 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
286 wqe->send.u.immd_src[0].r1 = 0;
287 wqe->send.u.immd_src[0].r2 = 0;
288 wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
289 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
290 plen;
291 } else {
292 for (i = 0; i < wr->num_sge; i++) {
293 if ((plen + wr->sg_list[i].length) < plen)
294 return -EMSGSIZE;
295 plen += wr->sg_list[i].length;
296 wqe->send.u.isgl_src[0].sge[i].stag =
297 cpu_to_be32(wr->sg_list[i].lkey);
298 wqe->send.u.isgl_src[0].sge[i].len =
299 cpu_to_be32(wr->sg_list[i].length);
300 wqe->send.u.isgl_src[0].sge[i].to =
301 cpu_to_be64(wr->sg_list[i].addr);
302 }
303 wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
304 wqe->send.u.isgl_src[0].r1 = 0;
305 wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
306 wqe->send.u.isgl_src[0].r2 = 0;
307 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
308 wr->num_sge * sizeof(struct fw_ri_sge);
309 }
310 } else {
311 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
312 wqe->send.u.immd_src[0].r1 = 0;
313 wqe->send.u.immd_src[0].r2 = 0;
314 wqe->send.u.immd_src[0].immdlen = 0;
315 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
316 }
317 *len16 = DIV_ROUND_UP(size, 16);
318 wqe->send.plen = cpu_to_be32(plen);
319 return 0;
320}
321
322static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
323{
324 int i;
325 u32 plen;
326 int size;
327 u8 *datap;
328
329 if (wr->num_sge > T4_MAX_WRITE_SGE)
330 return -EINVAL;
331 wqe->write.r2 = 0;
332 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
333 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
337 datap = (u8 *)wqe->write.u.immd_src[0].data;
338 for (i = 0; i < wr->num_sge; i++) {
339 if ((plen + wr->sg_list[i].length) >
340 T4_MAX_WRITE_INLINE) {
341 return -EMSGSIZE;
342 }
343 plen += wr->sg_list[i].length;
344 memcpy(datap,
345 (void *)(unsigned long)wr->sg_list[i].addr,
346 wr->sg_list[i].length);
347 datap += wr->sg_list[i].length;
348 }
349 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
350 wqe->write.u.immd_src[0].r1 = 0;
351 wqe->write.u.immd_src[0].r2 = 0;
352 wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
353 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
354 plen;
355 } else {
356 for (i = 0; i < wr->num_sge; i++) {
357 if ((plen + wr->sg_list[i].length) < plen)
358 return -EMSGSIZE;
359 plen += wr->sg_list[i].length;
360 wqe->write.u.isgl_src[0].sge[i].stag =
361 cpu_to_be32(wr->sg_list[i].lkey);
362 wqe->write.u.isgl_src[0].sge[i].len =
363 cpu_to_be32(wr->sg_list[i].length);
364 wqe->write.u.isgl_src[0].sge[i].to =
365 cpu_to_be64(wr->sg_list[i].addr);
366 }
367 wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
368 wqe->write.u.isgl_src[0].r1 = 0;
369 wqe->write.u.isgl_src[0].nsge =
370 cpu_to_be16(wr->num_sge);
371 wqe->write.u.isgl_src[0].r2 = 0;
372 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
373 wr->num_sge * sizeof(struct fw_ri_sge);
374 }
375 } else {
376 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
377 wqe->write.u.immd_src[0].r1 = 0;
378 wqe->write.u.immd_src[0].r2 = 0;
379 wqe->write.u.immd_src[0].immdlen = 0;
380 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
381 }
382 *len16 = DIV_ROUND_UP(size, 16);
383 wqe->write.plen = cpu_to_be32(plen);
384 return 0;
385}
386
387static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
388{
389 if (wr->num_sge > 1)
390 return -EINVAL;
391 if (wr->num_sge) {
392 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
393 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
394 >> 32));
395 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
396 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
397 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
398 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
399 >> 32));
400 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
401 } else {
402 wqe->read.stag_src = cpu_to_be32(2);
403 wqe->read.to_src_hi = 0;
404 wqe->read.to_src_lo = 0;
405 wqe->read.stag_sink = cpu_to_be32(2);
406 wqe->read.plen = 0;
407 wqe->read.to_sink_hi = 0;
408 wqe->read.to_sink_lo = 0;
409 }
410 wqe->read.r2 = 0;
411 wqe->read.r5 = 0;
412 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
413 return 0;
414}
415
416static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
417 struct ib_recv_wr *wr, u8 *len16)
418{
419 int i;
420 int plen = 0;
421
422 for (i = 0; i < wr->num_sge; i++) {
423 if ((plen + wr->sg_list[i].length) < plen)
424 return -EMSGSIZE;
425 plen += wr->sg_list[i].length;
426 wqe->recv.isgl.sge[i].stag =
427 cpu_to_be32(wr->sg_list[i].lkey);
428 wqe->recv.isgl.sge[i].len =
429 cpu_to_be32(wr->sg_list[i].length);
430 wqe->recv.isgl.sge[i].to =
431 cpu_to_be64(wr->sg_list[i].addr);
432 }
433 for (; i < T4_MAX_RECV_SGE; i++) {
434 wqe->recv.isgl.sge[i].stag = 0;
435 wqe->recv.isgl.sge[i].len = 0;
436 wqe->recv.isgl.sge[i].to = 0;
437 }
438 wqe->recv.isgl.op = FW_RI_DATA_ISGL;
439 wqe->recv.isgl.r1 = 0;
440 wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
441 wqe->recv.isgl.r2 = 0;
442 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
443 wr->num_sge * sizeof(struct fw_ri_sge), 16);
444 return 0;
445}
446
447static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
448{
449
450 struct fw_ri_immd *imdp;
451 __be64 *p;
452 int i;
453 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
454
455 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
456 return -EINVAL;
457
458 wqe->fr.qpbinde_to_dcacpu = 0;
459 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
460 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
461 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
462 wqe->fr.len_hi = 0;
463 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
464 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
465 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
466 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
467 0xffffffff);
468 if (pbllen > T4_MAX_FR_IMMD) {
469 struct c4iw_fr_page_list *c4pl =
470 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
471 struct fw_ri_dsgl *sglp;
472
473 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
474 sglp->op = FW_RI_DATA_DSGL;
475 sglp->r1 = 0;
476 sglp->nsge = cpu_to_be16(1);
477 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
478 sglp->len0 = cpu_to_be32(pbllen);
479
480 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
481 } else {
482 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
483 imdp->op = FW_RI_DATA_IMMD;
484 imdp->r1 = 0;
485 imdp->r2 = 0;
486 imdp->immdlen = cpu_to_be32(pbllen);
487 p = (__be64 *)(imdp + 1);
488 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
489 *p = cpu_to_be64(
490 (u64)wr->wr.fast_reg.page_list->page_list[i]);
491 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
492 16);
493 }
494 return 0;
495}
496
497static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
498 u8 *len16)
499{
500 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
501 wqe->inv.r2 = 0;
502 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
503 return 0;
504}
505
506void c4iw_qp_add_ref(struct ib_qp *qp)
507{
508 PDBG("%s ib_qp %p\n", __func__, qp);
509 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
510}
511
512void c4iw_qp_rem_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
516 wake_up(&(to_c4iw_qp(qp)->wait));
517}
518
519int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
520 struct ib_send_wr **bad_wr)
521{
522 int err = 0;
523 u8 len16 = 0;
524 enum fw_wr_opcodes fw_opcode = 0;
525 enum fw_ri_wr_flags fw_flags;
526 struct c4iw_qp *qhp;
527 union t4_wr *wqe;
528 u32 num_wrs;
529 struct t4_swsqe *swsqe;
530 unsigned long flag;
531 u16 idx = 0;
532
533 qhp = to_c4iw_qp(ibqp);
534 spin_lock_irqsave(&qhp->lock, flag);
535 if (t4_wq_in_error(&qhp->wq)) {
536 spin_unlock_irqrestore(&qhp->lock, flag);
537 return -EINVAL;
538 }
539 num_wrs = t4_sq_avail(&qhp->wq);
540 if (num_wrs == 0) {
541 spin_unlock_irqrestore(&qhp->lock, flag);
542 return -ENOMEM;
543 }
544 while (wr) {
545 if (num_wrs == 0) {
546 err = -ENOMEM;
547 *bad_wr = wr;
548 break;
549 }
550 wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
551 fw_flags = 0;
552 if (wr->send_flags & IB_SEND_SOLICITED)
553 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
554 if (wr->send_flags & IB_SEND_SIGNALED)
555 fw_flags |= FW_RI_COMPLETION_FLAG;
556 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
557 switch (wr->opcode) {
558 case IB_WR_SEND_WITH_INV:
559 case IB_WR_SEND:
560 if (wr->send_flags & IB_SEND_FENCE)
561 fw_flags |= FW_RI_READ_FENCE_FLAG;
562 fw_opcode = FW_RI_SEND_WR;
563 if (wr->opcode == IB_WR_SEND)
564 swsqe->opcode = FW_RI_SEND;
565 else
566 swsqe->opcode = FW_RI_SEND_WITH_INV;
567 err = build_rdma_send(wqe, wr, &len16);
568 break;
569 case IB_WR_RDMA_WRITE:
570 fw_opcode = FW_RI_RDMA_WRITE_WR;
571 swsqe->opcode = FW_RI_RDMA_WRITE;
572 err = build_rdma_write(wqe, wr, &len16);
573 break;
574 case IB_WR_RDMA_READ:
575 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16);
579 if (err)
580 break;
581 swsqe->read_len = wr->sg_list[0].length;
582 if (!qhp->wq.sq.oldest_read)
583 qhp->wq.sq.oldest_read = swsqe;
584 break;
585 case IB_WR_FAST_REG_MR:
586 fw_opcode = FW_RI_FR_NSMR_WR;
587 swsqe->opcode = FW_RI_FAST_REGISTER;
588 err = build_fastreg(wqe, wr, &len16);
589 break;
590 case IB_WR_LOCAL_INV:
591 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16);
594 break;
595 default:
596 PDBG("%s post of type=%d TBD!\n", __func__,
597 wr->opcode);
598 err = -EINVAL;
599 }
600 if (err) {
601 *bad_wr = wr;
602 break;
603 }
604 swsqe->idx = qhp->wq.sq.pidx;
605 swsqe->complete = 0;
606 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
607 swsqe->wr_id = wr->wr_id;
608
609 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
610
611 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
612 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
613 swsqe->opcode, swsqe->read_len);
614 wr = wr->next;
615 num_wrs--;
616 t4_sq_produce(&qhp->wq);
617 idx++;
618 }
619 if (t4_wq_db_enabled(&qhp->wq))
620 t4_ring_sq_db(&qhp->wq, idx);
621 spin_unlock_irqrestore(&qhp->lock, flag);
622 return err;
623}
624
625int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
626 struct ib_recv_wr **bad_wr)
627{
628 int err = 0;
629 struct c4iw_qp *qhp;
630 union t4_recv_wr *wqe;
631 u32 num_wrs;
632 u8 len16 = 0;
633 unsigned long flag;
634 u16 idx = 0;
635
636 qhp = to_c4iw_qp(ibqp);
637 spin_lock_irqsave(&qhp->lock, flag);
638 if (t4_wq_in_error(&qhp->wq)) {
639 spin_unlock_irqrestore(&qhp->lock, flag);
640 return -EINVAL;
641 }
642 num_wrs = t4_rq_avail(&qhp->wq);
643 if (num_wrs == 0) {
644 spin_unlock_irqrestore(&qhp->lock, flag);
645 return -ENOMEM;
646 }
647 while (wr) {
648 if (wr->num_sge > T4_MAX_RECV_SGE) {
649 err = -EINVAL;
650 *bad_wr = wr;
651 break;
652 }
653 wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
654 if (num_wrs)
655 err = build_rdma_recv(qhp, wqe, wr, &len16);
656 else
657 err = -ENOMEM;
658 if (err) {
659 *bad_wr = wr;
660 break;
661 }
662
663 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
664
665 wqe->recv.opcode = FW_RI_RECV_WR;
666 wqe->recv.r1 = 0;
667 wqe->recv.wrid = qhp->wq.rq.pidx;
668 wqe->recv.r2[0] = 0;
669 wqe->recv.r2[1] = 0;
670 wqe->recv.r2[2] = 0;
671 wqe->recv.len16 = len16;
672 if (len16 < 5)
673 wqe->flits[8] = 0;
674
675 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
676 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
677 t4_rq_produce(&qhp->wq);
678 wr = wr->next;
679 num_wrs--;
680 idx++;
681 }
682 if (t4_wq_db_enabled(&qhp->wq))
683 t4_ring_rq_db(&qhp->wq, idx);
684 spin_unlock_irqrestore(&qhp->lock, flag);
685 return err;
686}
687
688int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
689{
690 return -ENOSYS;
691}
692
693static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
694 u8 *ecode)
695{
696 int status;
697 int tagged;
698 int opcode;
699 int rqtype;
700 int send_inv;
701
702 if (!err_cqe) {
703 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
704 *ecode = 0;
705 return;
706 }
707
708 status = CQE_STATUS(err_cqe);
709 opcode = CQE_OPCODE(err_cqe);
710 rqtype = RQ_TYPE(err_cqe);
711 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
712 (opcode == FW_RI_SEND_WITH_SE_INV);
713 tagged = (opcode == FW_RI_RDMA_WRITE) ||
714 (rqtype && (opcode == FW_RI_READ_RESP));
715
716 switch (status) {
717 case T4_ERR_STAG:
718 if (send_inv) {
719 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
720 *ecode = RDMAP_CANT_INV_STAG;
721 } else {
722 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
723 *ecode = RDMAP_INV_STAG;
724 }
725 break;
726 case T4_ERR_PDID:
727 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
728 if ((opcode == FW_RI_SEND_WITH_INV) ||
729 (opcode == FW_RI_SEND_WITH_SE_INV))
730 *ecode = RDMAP_CANT_INV_STAG;
731 else
732 *ecode = RDMAP_STAG_NOT_ASSOC;
733 break;
734 case T4_ERR_QPID:
735 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
736 *ecode = RDMAP_STAG_NOT_ASSOC;
737 break;
738 case T4_ERR_ACCESS:
739 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
740 *ecode = RDMAP_ACC_VIOL;
741 break;
742 case T4_ERR_WRAP:
743 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
744 *ecode = RDMAP_TO_WRAP;
745 break;
746 case T4_ERR_BOUND:
747 if (tagged) {
748 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
749 *ecode = DDPT_BASE_BOUNDS;
750 } else {
751 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
752 *ecode = RDMAP_BASE_BOUNDS;
753 }
754 break;
755 case T4_ERR_INVALIDATE_SHARED_MR:
756 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
757 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
758 *ecode = RDMAP_CANT_INV_STAG;
759 break;
760 case T4_ERR_ECC:
761 case T4_ERR_ECC_PSTAG:
762 case T4_ERR_INTERNAL_ERR:
763 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
764 *ecode = 0;
765 break;
766 case T4_ERR_OUT_OF_RQE:
767 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
768 *ecode = DDPU_INV_MSN_NOBUF;
769 break;
770 case T4_ERR_PBL_ADDR_BOUND:
771 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
772 *ecode = DDPT_BASE_BOUNDS;
773 break;
774 case T4_ERR_CRC:
775 *layer_type = LAYER_MPA|DDP_LLP;
776 *ecode = MPA_CRC_ERR;
777 break;
778 case T4_ERR_MARKER:
779 *layer_type = LAYER_MPA|DDP_LLP;
780 *ecode = MPA_MARKER_ERR;
781 break;
782 case T4_ERR_PDU_LEN_ERR:
783 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
784 *ecode = DDPU_MSG_TOOBIG;
785 break;
786 case T4_ERR_DDP_VERSION:
787 if (tagged) {
788 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
789 *ecode = DDPT_INV_VERS;
790 } else {
791 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
792 *ecode = DDPU_INV_VERS;
793 }
794 break;
795 case T4_ERR_RDMA_VERSION:
796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
797 *ecode = RDMAP_INV_VERS;
798 break;
799 case T4_ERR_OPCODE:
800 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
801 *ecode = RDMAP_INV_OPCODE;
802 break;
803 case T4_ERR_DDP_QUEUE_NUM:
804 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
805 *ecode = DDPU_INV_QN;
806 break;
807 case T4_ERR_MSN:
808 case T4_ERR_MSN_GAP:
809 case T4_ERR_MSN_RANGE:
810 case T4_ERR_IRD_OVERFLOW:
811 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
812 *ecode = DDPU_INV_MSN_RANGE;
813 break;
814 case T4_ERR_TBIT:
815 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
816 *ecode = 0;
817 break;
818 case T4_ERR_MO:
819 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
820 *ecode = DDPU_INV_MO;
821 break;
822 default:
823 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
824 *ecode = 0;
825 break;
826 }
827}
828
829int c4iw_post_zb_read(struct c4iw_qp *qhp)
830{
831 union t4_wr *wqe;
832 struct sk_buff *skb;
833 u8 len16;
834
835 PDBG("%s enter\n", __func__);
836 skb = alloc_skb(40, GFP_KERNEL);
837 if (!skb) {
838 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
839 return -ENOMEM;
840 }
841 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
842
843 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
844 memset(wqe, 0, sizeof wqe->read);
845 wqe->read.r2 = cpu_to_be64(0);
846 wqe->read.stag_sink = cpu_to_be32(1);
847 wqe->read.to_sink_hi = cpu_to_be32(0);
848 wqe->read.to_sink_lo = cpu_to_be32(1);
849 wqe->read.stag_src = cpu_to_be32(1);
850 wqe->read.plen = cpu_to_be32(0);
851 wqe->read.to_src_hi = cpu_to_be32(0);
852 wqe->read.to_src_lo = cpu_to_be32(1);
853 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
854 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
855
856 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
857}
858
859static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
860 gfp_t gfp)
861{
862 struct fw_ri_wr *wqe;
863 struct sk_buff *skb;
864 struct terminate_message *term;
865
866 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
867 qhp->ep->hwtid);
868
869 skb = alloc_skb(sizeof *wqe, gfp);
870 if (!skb)
871 return;
872 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
873
874 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
875 memset(wqe, 0, sizeof *wqe);
876 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
877 wqe->flowid_len16 = cpu_to_be32(
878 FW_WR_FLOWID(qhp->ep->hwtid) |
879 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
880
881 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
882 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
883 term = (struct terminate_message *)wqe->u.terminate.termmsg;
884 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
885 c4iw_ofld_send(&qhp->rhp->rdev, skb);
886}
887
888/*
889 * Assumes qhp lock is held.
890 */
891static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
892 struct c4iw_cq *schp, unsigned long *flag)
893{
894 int count;
895 int flushed;
896
897 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
898 /* take a ref on the qhp since we must release the lock */
899 atomic_inc(&qhp->refcnt);
900 spin_unlock_irqrestore(&qhp->lock, *flag);
901
902 /* locking heirarchy: cq lock first, then qp lock. */
903 spin_lock_irqsave(&rchp->lock, *flag);
904 spin_lock(&qhp->lock);
905 c4iw_flush_hw_cq(&rchp->cq);
906 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
907 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
908 spin_unlock(&qhp->lock);
909 spin_unlock_irqrestore(&rchp->lock, *flag);
910 if (flushed)
911 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
912
913 /* locking heirarchy: cq lock first, then qp lock. */
914 spin_lock_irqsave(&schp->lock, *flag);
915 spin_lock(&qhp->lock);
916 c4iw_flush_hw_cq(&schp->cq);
917 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
918 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
919 spin_unlock(&qhp->lock);
920 spin_unlock_irqrestore(&schp->lock, *flag);
921 if (flushed)
922 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
923
924 /* deref */
925 if (atomic_dec_and_test(&qhp->refcnt))
926 wake_up(&qhp->wait);
927
928 spin_lock_irqsave(&qhp->lock, *flag);
929}
930
931static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
932{
933 struct c4iw_cq *rchp, *schp;
934
935 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
936 schp = get_chp(qhp->rhp, qhp->attr.scq);
937
938 if (qhp->ibqp.uobject) {
939 t4_set_wq_in_error(&qhp->wq);
940 t4_set_cq_in_error(&rchp->cq);
941 if (schp != rchp)
942 t4_set_cq_in_error(&schp->cq);
943 return;
944 }
945 __flush_qp(qhp, rchp, schp, flag);
946}
947
948static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
949{
950 struct fw_ri_wr *wqe;
951 int ret;
952 struct c4iw_wr_wait wr_wait;
953 struct sk_buff *skb;
954
955 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
956 qhp->ep->hwtid);
957
958 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
959 if (!skb)
960 return -ENOMEM;
961 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
962
963 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
964 memset(wqe, 0, sizeof *wqe);
965 wqe->op_compl = cpu_to_be32(
966 FW_WR_OP(FW_RI_INIT_WR) |
967 FW_WR_COMPL(1));
968 wqe->flowid_len16 = cpu_to_be32(
969 FW_WR_FLOWID(qhp->ep->hwtid) |
970 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
971 wqe->cookie = (u64)&wr_wait;
972
973 wqe->u.fini.type = FW_RI_TYPE_FINI;
974 c4iw_init_wr_wait(&wr_wait);
975 ret = c4iw_ofld_send(&rhp->rdev, skb);
976 if (ret)
977 goto out;
978
979 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
980 if (!wr_wait.done) {
981 printk(KERN_ERR MOD "Device %s not responding!\n",
982 pci_name(rhp->rdev.lldi.pdev));
983 rhp->rdev.flags = T4_FATAL_ERROR;
984 ret = -EIO;
985 } else {
986 ret = wr_wait.ret;
987 if (ret)
988 printk(KERN_WARNING MOD
989 "%s: Abnormal close qpid %d ret %u\n",
990 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
991 ret);
992 }
993out:
994 PDBG("%s ret %d\n", __func__, ret);
995 return ret;
996}
997
998static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
999{
1000 memset(&init->u, 0, sizeof init->u);
1001 switch (p2p_type) {
1002 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1003 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1004 init->u.write.stag_sink = cpu_to_be32(1);
1005 init->u.write.to_sink = cpu_to_be64(1);
1006 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1007 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1008 sizeof(struct fw_ri_immd),
1009 16);
1010 break;
1011 case FW_RI_INIT_P2PTYPE_READ_REQ:
1012 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1013 init->u.read.stag_src = cpu_to_be32(1);
1014 init->u.read.to_src_lo = cpu_to_be32(1);
1015 init->u.read.stag_sink = cpu_to_be32(1);
1016 init->u.read.to_sink_lo = cpu_to_be32(1);
1017 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1018 break;
1019 }
1020}
1021
1022static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1023{
1024 struct fw_ri_wr *wqe;
1025 int ret;
1026 struct c4iw_wr_wait wr_wait;
1027 struct sk_buff *skb;
1028
1029 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1030 qhp->ep->hwtid);
1031
1032 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
1033 if (!skb)
1034 return -ENOMEM;
1035 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1036
1037 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1038 memset(wqe, 0, sizeof *wqe);
1039 wqe->op_compl = cpu_to_be32(
1040 FW_WR_OP(FW_RI_INIT_WR) |
1041 FW_WR_COMPL(1));
1042 wqe->flowid_len16 = cpu_to_be32(
1043 FW_WR_FLOWID(qhp->ep->hwtid) |
1044 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1045
1046 wqe->cookie = (u64)&wr_wait;
1047
1048 wqe->u.init.type = FW_RI_TYPE_INIT;
1049 wqe->u.init.mpareqbit_p2ptype =
1050 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1051 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1052 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1053 if (qhp->attr.mpa_attr.recv_marker_enabled)
1054 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1055 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1056 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1057 if (qhp->attr.mpa_attr.crc_enabled)
1058 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1059
1060 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1061 FW_RI_QP_RDMA_WRITE_ENABLE |
1062 FW_RI_QP_BIND_ENABLE;
1063 if (!qhp->ibqp.uobject)
1064 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1065 FW_RI_QP_STAG0_ENABLE;
1066 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1067 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1068 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1069 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1070 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1071 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1072 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1073 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1074 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1075 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1076 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1077 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1078 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1079 rhp->rdev.lldi.vr->rq.start);
1080 if (qhp->attr.mpa_attr.initiator)
1081 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1082
1083 c4iw_init_wr_wait(&wr_wait);
1084 ret = c4iw_ofld_send(&rhp->rdev, skb);
1085 if (ret)
1086 goto out;
1087
1088 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1089 if (!wr_wait.done) {
1090 printk(KERN_ERR MOD "Device %s not responding!\n",
1091 pci_name(rhp->rdev.lldi.pdev));
1092 rhp->rdev.flags = T4_FATAL_ERROR;
1093 ret = -EIO;
1094 } else
1095 ret = wr_wait.ret;
1096out:
1097 PDBG("%s ret %d\n", __func__, ret);
1098 return ret;
1099}
1100
1101int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1102 enum c4iw_qp_attr_mask mask,
1103 struct c4iw_qp_attributes *attrs,
1104 int internal)
1105{
1106 int ret = 0;
1107 struct c4iw_qp_attributes newattr = qhp->attr;
1108 unsigned long flag;
1109 int disconnect = 0;
1110 int terminate = 0;
1111 int abort = 0;
1112 int free = 0;
1113 struct c4iw_ep *ep = NULL;
1114
1115 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1116 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1117 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1118
1119 spin_lock_irqsave(&qhp->lock, flag);
1120
1121 /* Process attr changes if in IDLE */
1122 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1123 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1124 ret = -EIO;
1125 goto out;
1126 }
1127 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1128 newattr.enable_rdma_read = attrs->enable_rdma_read;
1129 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1130 newattr.enable_rdma_write = attrs->enable_rdma_write;
1131 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1132 newattr.enable_bind = attrs->enable_bind;
1133 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1134 if (attrs->max_ord > c4iw_max_read_depth) {
1135 ret = -EINVAL;
1136 goto out;
1137 }
1138 newattr.max_ord = attrs->max_ord;
1139 }
1140 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1141 if (attrs->max_ird > c4iw_max_read_depth) {
1142 ret = -EINVAL;
1143 goto out;
1144 }
1145 newattr.max_ird = attrs->max_ird;
1146 }
1147 qhp->attr = newattr;
1148 }
1149
1150 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1151 goto out;
1152 if (qhp->attr.state == attrs->next_state)
1153 goto out;
1154
1155 switch (qhp->attr.state) {
1156 case C4IW_QP_STATE_IDLE:
1157 switch (attrs->next_state) {
1158 case C4IW_QP_STATE_RTS:
1159 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1164 ret = -EINVAL;
1165 goto out;
1166 }
1167 qhp->attr.mpa_attr = attrs->mpa_attr;
1168 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1169 qhp->ep = qhp->attr.llp_stream_handle;
1170 qhp->attr.state = C4IW_QP_STATE_RTS;
1171
1172 /*
1173 * Ref the endpoint here and deref when we
1174 * disassociate the endpoint from the QP. This
1175 * happens in CLOSING->IDLE transition or *->ERROR
1176 * transition.
1177 */
1178 c4iw_get_ep(&qhp->ep->com);
1179 spin_unlock_irqrestore(&qhp->lock, flag);
1180 ret = rdma_init(rhp, qhp);
1181 spin_lock_irqsave(&qhp->lock, flag);
1182 if (ret)
1183 goto err;
1184 break;
1185 case C4IW_QP_STATE_ERROR:
1186 qhp->attr.state = C4IW_QP_STATE_ERROR;
1187 flush_qp(qhp, &flag);
1188 break;
1189 default:
1190 ret = -EINVAL;
1191 goto out;
1192 }
1193 break;
1194 case C4IW_QP_STATE_RTS:
1195 switch (attrs->next_state) {
1196 case C4IW_QP_STATE_CLOSING:
1197 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1198 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1199 if (!internal) {
1200 abort = 0;
1201 disconnect = 1;
1202 ep = qhp->ep;
1203 c4iw_get_ep(&ep->com);
1204 }
1205 spin_unlock_irqrestore(&qhp->lock, flag);
1206 ret = rdma_fini(rhp, qhp);
1207 spin_lock_irqsave(&qhp->lock, flag);
1208 if (ret) {
1209 ep = qhp->ep;
1210 c4iw_get_ep(&ep->com);
1211 disconnect = abort = 1;
1212 goto err;
1213 }
1214 break;
1215 case C4IW_QP_STATE_TERMINATE:
1216 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1217 if (qhp->ibqp.uobject)
1218 t4_set_wq_in_error(&qhp->wq);
1219 ep = qhp->ep;
1220 c4iw_get_ep(&ep->com);
1221 terminate = 1;
1222 disconnect = 1;
1223 break;
1224 case C4IW_QP_STATE_ERROR:
1225 qhp->attr.state = C4IW_QP_STATE_ERROR;
1226 if (!internal) {
1227 abort = 1;
1228 disconnect = 1;
1229 ep = qhp->ep;
1230 c4iw_get_ep(&ep->com);
1231 }
1232 goto err;
1233 break;
1234 default:
1235 ret = -EINVAL;
1236 goto out;
1237 }
1238 break;
1239 case C4IW_QP_STATE_CLOSING:
1240 if (!internal) {
1241 ret = -EINVAL;
1242 goto out;
1243 }
1244 switch (attrs->next_state) {
1245 case C4IW_QP_STATE_IDLE:
1246 flush_qp(qhp, &flag);
1247 qhp->attr.state = C4IW_QP_STATE_IDLE;
1248 qhp->attr.llp_stream_handle = NULL;
1249 c4iw_put_ep(&qhp->ep->com);
1250 qhp->ep = NULL;
1251 wake_up(&qhp->wait);
1252 break;
1253 case C4IW_QP_STATE_ERROR:
1254 goto err;
1255 default:
1256 ret = -EINVAL;
1257 goto err;
1258 }
1259 break;
1260 case C4IW_QP_STATE_ERROR:
1261 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1266 ret = -EINVAL;
1267 goto out;
1268 }
1269 qhp->attr.state = C4IW_QP_STATE_IDLE;
1270 break;
1271 case C4IW_QP_STATE_TERMINATE:
1272 if (!internal) {
1273 ret = -EINVAL;
1274 goto out;
1275 }
1276 goto err;
1277 break;
1278 default:
1279 printk(KERN_ERR "%s in a bad state %d\n",
1280 __func__, qhp->attr.state);
1281 ret = -EINVAL;
1282 goto err;
1283 break;
1284 }
1285 goto out;
1286err:
1287 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1288 qhp->wq.sq.qid);
1289
1290 /* disassociate the LLP connection */
1291 qhp->attr.llp_stream_handle = NULL;
1292 ep = qhp->ep;
1293 qhp->ep = NULL;
1294 qhp->attr.state = C4IW_QP_STATE_ERROR;
1295 free = 1;
1296 wake_up(&qhp->wait);
1297 BUG_ON(!ep);
1298 flush_qp(qhp, &flag);
1299out:
1300 spin_unlock_irqrestore(&qhp->lock, flag);
1301
1302 if (terminate)
1303 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1304
1305 /*
1306 * If disconnect is 1, then we need to initiate a disconnect
1307 * on the EP. This can be a normal close (RTS->CLOSING) or
1308 * an abnormal close (RTS/CLOSING->ERROR).
1309 */
1310 if (disconnect) {
1311 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1312 GFP_KERNEL);
1313 c4iw_put_ep(&ep->com);
1314 }
1315
1316 /*
1317 * If free is 1, then we've disassociated the EP from the QP
1318 * and we need to dereference the EP.
1319 */
1320 if (free)
1321 c4iw_put_ep(&ep->com);
1322
1323 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1324 return ret;
1325}
1326
1327int c4iw_destroy_qp(struct ib_qp *ib_qp)
1328{
1329 struct c4iw_dev *rhp;
1330 struct c4iw_qp *qhp;
1331 struct c4iw_qp_attributes attrs;
1332 struct c4iw_ucontext *ucontext;
1333
1334 qhp = to_c4iw_qp(ib_qp);
1335 rhp = qhp->rhp;
1336
1337 attrs.next_state = C4IW_QP_STATE_ERROR;
1338 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1339 wait_event(qhp->wait, !qhp->ep);
1340
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345
1346 ucontext = ib_qp->uobject ?
1347 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1348 destroy_qp(&rhp->rdev, &qhp->wq,
1349 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1350
1351 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1352 kfree(qhp);
1353 return 0;
1354}
1355
1356struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1357 struct ib_udata *udata)
1358{
1359 struct c4iw_dev *rhp;
1360 struct c4iw_qp *qhp;
1361 struct c4iw_pd *php;
1362 struct c4iw_cq *schp;
1363 struct c4iw_cq *rchp;
1364 struct c4iw_create_qp_resp uresp;
1365 int sqsize, rqsize;
1366 struct c4iw_ucontext *ucontext;
1367 int ret;
1368 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1369
1370 PDBG("%s ib_pd %p\n", __func__, pd);
1371
1372 if (attrs->qp_type != IB_QPT_RC)
1373 return ERR_PTR(-EINVAL);
1374
1375 php = to_c4iw_pd(pd);
1376 rhp = php->rhp;
1377 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1378 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1379 if (!schp || !rchp)
1380 return ERR_PTR(-EINVAL);
1381
1382 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1383 return ERR_PTR(-EINVAL);
1384
1385 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1386 if (rqsize > T4_MAX_RQ_SIZE)
1387 return ERR_PTR(-E2BIG);
1388
1389 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1390 if (sqsize > T4_MAX_SQ_SIZE)
1391 return ERR_PTR(-E2BIG);
1392
1393 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1394
1395
1396 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1397 if (!qhp)
1398 return ERR_PTR(-ENOMEM);
1399 qhp->wq.sq.size = sqsize;
1400 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1401 qhp->wq.rq.size = rqsize;
1402 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1403
1404 if (ucontext) {
1405 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1406 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1407 }
1408
1409 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1410 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1411
1412 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1413 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1414 if (ret)
1415 goto err1;
1416
1417 attrs->cap.max_recv_wr = rqsize - 1;
1418 attrs->cap.max_send_wr = sqsize - 1;
1419 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1420
1421 qhp->rhp = rhp;
1422 qhp->attr.pd = php->pdid;
1423 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1424 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1425 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1426 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1427 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1428 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1429 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1430 qhp->attr.state = C4IW_QP_STATE_IDLE;
1431 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1432 qhp->attr.enable_rdma_read = 1;
1433 qhp->attr.enable_rdma_write = 1;
1434 qhp->attr.enable_bind = 1;
1435 qhp->attr.max_ord = 1;
1436 qhp->attr.max_ird = 1;
1437 spin_lock_init(&qhp->lock);
1438 init_waitqueue_head(&qhp->wait);
1439 atomic_set(&qhp->refcnt, 1);
1440
1441 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1442 if (ret)
1443 goto err2;
1444
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) {
1452 ret = -ENOMEM;
1453 goto err4;
1454 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) {
1457 ret = -ENOMEM;
1458 goto err5;
1459 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) {
1462 ret = -ENOMEM;
1463 goto err6;
1464 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) {
1467 ret = -ENOMEM;
1468 goto err7;
1469 }
1470
1471 uresp.qid_mask = rhp->rdev.qpmask;
1472 uresp.sqid = qhp->wq.sq.qid;
1473 uresp.sq_size = qhp->wq.sq.size;
1474 uresp.sq_memsize = qhp->wq.sq.memsize;
1475 uresp.rqid = qhp->wq.rq.qid;
1476 uresp.rq_size = qhp->wq.rq.size;
1477 uresp.rq_memsize = qhp->wq.rq.memsize;
1478 spin_lock(&ucontext->mmap_lock);
1479 uresp.sq_key = ucontext->key;
1480 ucontext->key += PAGE_SIZE;
1481 uresp.rq_key = ucontext->key;
1482 ucontext->key += PAGE_SIZE;
1483 uresp.sq_db_gts_key = ucontext->key;
1484 ucontext->key += PAGE_SIZE;
1485 uresp.rq_db_gts_key = ucontext->key;
1486 ucontext->key += PAGE_SIZE;
1487 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret)
1490 goto err8;
1491 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1494 insert_mmap(ucontext, mm1);
1495 mm2->key = uresp.rq_key;
1496 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1497 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1498 insert_mmap(ucontext, mm2);
1499 mm3->key = uresp.sq_db_gts_key;
1500 mm3->addr = qhp->wq.sq.udb;
1501 mm3->len = PAGE_SIZE;
1502 insert_mmap(ucontext, mm3);
1503 mm4->key = uresp.rq_db_gts_key;
1504 mm4->addr = qhp->wq.rq.udb;
1505 mm4->len = PAGE_SIZE;
1506 insert_mmap(ucontext, mm4);
1507 }
1508 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1509 init_timer(&(qhp->timer));
1510 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid);
1513 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7:
1517 kfree(mm3);
1518err6:
1519 kfree(mm2);
1520err5:
1521 kfree(mm1);
1522err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1524err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2:
1527 destroy_qp(&rhp->rdev, &qhp->wq,
1528 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1529err1:
1530 kfree(qhp);
1531 return ERR_PTR(ret);
1532}
1533
1534int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1535 int attr_mask, struct ib_udata *udata)
1536{
1537 struct c4iw_dev *rhp;
1538 struct c4iw_qp *qhp;
1539 enum c4iw_qp_attr_mask mask = 0;
1540 struct c4iw_qp_attributes attrs;
1541
1542 PDBG("%s ib_qp %p\n", __func__, ibqp);
1543
1544 /* iwarp does not support the RTR state */
1545 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1546 attr_mask &= ~IB_QP_STATE;
1547
1548 /* Make sure we still have something left to do */
1549 if (!attr_mask)
1550 return 0;
1551
1552 memset(&attrs, 0, sizeof attrs);
1553 qhp = to_c4iw_qp(ibqp);
1554 rhp = qhp->rhp;
1555
1556 attrs.next_state = c4iw_convert_state(attr->qp_state);
1557 attrs.enable_rdma_read = (attr->qp_access_flags &
1558 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1559 attrs.enable_rdma_write = (attr->qp_access_flags &
1560 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1561 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1562
1563
1564 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1565 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1566 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1567 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1568 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1569
1570 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1571}
1572
1573struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1574{
1575 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1576 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1577}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 000000000000..fb195d1d9015
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* Crude resource management */
33#include <linux/kernel.h>
34#include <linux/random.h>
35#include <linux/slab.h>
36#include <linux/kfifo.h>
37#include <linux/spinlock.h>
38#include <linux/errno.h>
39#include <linux/genalloc.h>
40#include "iw_cxgb4.h"
41
42#define RANDOM_SIZE 16
43
44static int __c4iw_init_resource_fifo(struct kfifo *fifo,
45 spinlock_t *fifo_lock,
46 u32 nr, u32 skip_low,
47 u32 skip_high,
48 int random)
49{
50 u32 i, j, entry = 0, idx;
51 u32 random_bytes;
52 u32 rarray[16];
53 spin_lock_init(fifo_lock);
54
55 if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
56 return -ENOMEM;
57
58 for (i = 0; i < skip_low + skip_high; i++)
59 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
60 if (random) {
61 j = 0;
62 random_bytes = random32();
63 for (i = 0; i < RANDOM_SIZE; i++)
64 rarray[i] = i + skip_low;
65 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
66 if (j >= RANDOM_SIZE) {
67 j = 0;
68 random_bytes = random32();
69 }
70 idx = (random_bytes >> (j * 2)) & 0xF;
71 kfifo_in(fifo,
72 (unsigned char *) &rarray[idx],
73 sizeof(u32));
74 rarray[idx] = i;
75 j++;
76 }
77 for (i = 0; i < RANDOM_SIZE; i++)
78 kfifo_in(fifo,
79 (unsigned char *) &rarray[i],
80 sizeof(u32));
81 } else
82 for (i = skip_low; i < nr - skip_high; i++)
83 kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
84
85 for (i = 0; i < skip_low + skip_high; i++)
86 if (kfifo_out_locked(fifo, (unsigned char *) &entry,
87 sizeof(u32), fifo_lock))
88 break;
89 return 0;
90}
91
92static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
93 u32 nr, u32 skip_low, u32 skip_high)
94{
95 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
96 skip_high, 0);
97}
98
99static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
100 spinlock_t *fifo_lock,
101 u32 nr, u32 skip_low, u32 skip_high)
102{
103 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
104 skip_high, 1);
105}
106
107static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
108{
109 u32 i;
110
111 spin_lock_init(&rdev->resource.qid_fifo_lock);
112
113 if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
114 GFP_KERNEL))
115 return -ENOMEM;
116
117 for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
118 if (!(i & rdev->qpmask))
119 kfifo_in(&rdev->resource.qid_fifo,
120 (unsigned char *) &i, sizeof(u32));
121 return 0;
122}
123
124/* nr_* must be power of 2 */
125int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
126{
127 int err = 0;
128 err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
129 &rdev->resource.tpt_fifo_lock,
130 nr_tpt, 1, 0);
131 if (err)
132 goto tpt_err;
133 err = c4iw_init_qid_fifo(rdev);
134 if (err)
135 goto qid_err;
136 err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
137 &rdev->resource.pdid_fifo_lock,
138 nr_pdid, 1, 0);
139 if (err)
140 goto pdid_err;
141 return 0;
142pdid_err:
143 kfifo_free(&rdev->resource.qid_fifo);
144qid_err:
145 kfifo_free(&rdev->resource.tpt_fifo);
146tpt_err:
147 return -ENOMEM;
148}
149
150/*
151 * returns 0 if no resource available
152 */
153u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
154{
155 u32 entry;
156 if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
157 return entry;
158 else
159 return 0;
160}
161
162void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
163{
164 PDBG("%s entry 0x%x\n", __func__, entry);
165 kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
166}
167
168u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
169{
170 struct c4iw_qid_list *entry;
171 u32 qid;
172 int i;
173
174 mutex_lock(&uctx->lock);
175 if (!list_empty(&uctx->cqids)) {
176 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
177 entry);
178 list_del(&entry->entry);
179 qid = entry->qid;
180 kfree(entry);
181 } else {
182 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
183 &rdev->resource.qid_fifo_lock);
184 if (!qid)
185 goto out;
186 for (i = qid+1; i & rdev->qpmask; i++) {
187 entry = kmalloc(sizeof *entry, GFP_KERNEL);
188 if (!entry)
189 goto out;
190 entry->qid = i;
191 list_add_tail(&entry->entry, &uctx->cqids);
192 }
193
194 /*
195 * now put the same ids on the qp list since they all
196 * map to the same db/gts page.
197 */
198 entry = kmalloc(sizeof *entry, GFP_KERNEL);
199 if (!entry)
200 goto out;
201 entry->qid = qid;
202 list_add_tail(&entry->entry, &uctx->qpids);
203 for (i = qid+1; i & rdev->qpmask; i++) {
204 entry = kmalloc(sizeof *entry, GFP_KERNEL);
205 if (!entry)
206 goto out;
207 entry->qid = i;
208 list_add_tail(&entry->entry, &uctx->qpids);
209 }
210 }
211out:
212 mutex_unlock(&uctx->lock);
213 PDBG("%s qid 0x%x\n", __func__, qid);
214 return qid;
215}
216
217void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
218 struct c4iw_dev_ucontext *uctx)
219{
220 struct c4iw_qid_list *entry;
221
222 entry = kmalloc(sizeof *entry, GFP_KERNEL);
223 if (!entry)
224 return;
225 PDBG("%s qid 0x%x\n", __func__, qid);
226 entry->qid = qid;
227 mutex_lock(&uctx->lock);
228 list_add_tail(&entry->entry, &uctx->cqids);
229 mutex_unlock(&uctx->lock);
230}
231
232u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
233{
234 struct c4iw_qid_list *entry;
235 u32 qid;
236 int i;
237
238 mutex_lock(&uctx->lock);
239 if (!list_empty(&uctx->qpids)) {
240 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
241 entry);
242 list_del(&entry->entry);
243 qid = entry->qid;
244 kfree(entry);
245 } else {
246 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
247 &rdev->resource.qid_fifo_lock);
248 if (!qid)
249 goto out;
250 for (i = qid+1; i & rdev->qpmask; i++) {
251 entry = kmalloc(sizeof *entry, GFP_KERNEL);
252 if (!entry)
253 goto out;
254 entry->qid = i;
255 list_add_tail(&entry->entry, &uctx->qpids);
256 }
257
258 /*
259 * now put the same ids on the cq list since they all
260 * map to the same db/gts page.
261 */
262 entry = kmalloc(sizeof *entry, GFP_KERNEL);
263 if (!entry)
264 goto out;
265 entry->qid = qid;
266 list_add_tail(&entry->entry, &uctx->cqids);
267 for (i = qid; i & rdev->qpmask; i++) {
268 entry = kmalloc(sizeof *entry, GFP_KERNEL);
269 if (!entry)
270 goto out;
271 entry->qid = i;
272 list_add_tail(&entry->entry, &uctx->cqids);
273 }
274 }
275out:
276 mutex_unlock(&uctx->lock);
277 PDBG("%s qid 0x%x\n", __func__, qid);
278 return qid;
279}
280
281void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
282 struct c4iw_dev_ucontext *uctx)
283{
284 struct c4iw_qid_list *entry;
285
286 entry = kmalloc(sizeof *entry, GFP_KERNEL);
287 if (!entry)
288 return;
289 PDBG("%s qid 0x%x\n", __func__, qid);
290 entry->qid = qid;
291 mutex_lock(&uctx->lock);
292 list_add_tail(&entry->entry, &uctx->qpids);
293 mutex_unlock(&uctx->lock);
294}
295
296void c4iw_destroy_resource(struct c4iw_resource *rscp)
297{
298 kfifo_free(&rscp->tpt_fifo);
299 kfifo_free(&rscp->qid_fifo);
300 kfifo_free(&rscp->pdid_fifo);
301}
302
303/*
304 * PBL Memory Manager. Uses Linux generic allocator.
305 */
306
307#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
308
309u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
310{
311 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
312 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
313 return (u32)addr;
314}
315
316void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
317{
318 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
319 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
320}
321
322int c4iw_pblpool_create(struct c4iw_rdev *rdev)
323{
324 unsigned pbl_start, pbl_chunk, pbl_top;
325
326 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
327 if (!rdev->pbl_pool)
328 return -ENOMEM;
329
330 pbl_start = rdev->lldi.vr->pbl.start;
331 pbl_chunk = rdev->lldi.vr->pbl.size;
332 pbl_top = pbl_start + pbl_chunk;
333
334 while (pbl_start < pbl_top) {
335 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
336 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
337 PDBG("%s failed to add PBL chunk (%x/%x)\n",
338 __func__, pbl_start, pbl_chunk);
339 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
340 printk(KERN_WARNING MOD
341 "Failed to add all PBL chunks (%x/%x)\n",
342 pbl_start,
343 pbl_top - pbl_start);
344 return 0;
345 }
346 pbl_chunk >>= 1;
347 } else {
348 PDBG("%s added PBL chunk (%x/%x)\n",
349 __func__, pbl_start, pbl_chunk);
350 pbl_start += pbl_chunk;
351 }
352 }
353
354 return 0;
355}
356
357void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
358{
359 gen_pool_destroy(rdev->pbl_pool);
360}
361
362/*
363 * RQT Memory Manager. Uses Linux generic allocator.
364 */
365
366#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
367
368u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
369{
370 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
371 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
372 return (u32)addr;
373}
374
375void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
376{
377 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
378 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
379}
380
381int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
382{
383 unsigned rqt_start, rqt_chunk, rqt_top;
384
385 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
386 if (!rdev->rqt_pool)
387 return -ENOMEM;
388
389 rqt_start = rdev->lldi.vr->rq.start;
390 rqt_chunk = rdev->lldi.vr->rq.size;
391 rqt_top = rqt_start + rqt_chunk;
392
393 while (rqt_start < rqt_top) {
394 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
395 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
396 PDBG("%s failed to add RQT chunk (%x/%x)\n",
397 __func__, rqt_start, rqt_chunk);
398 if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
399 printk(KERN_WARNING MOD
400 "Failed to add all RQT chunks (%x/%x)\n",
401 rqt_start, rqt_top - rqt_start);
402 return 0;
403 }
404 rqt_chunk >>= 1;
405 } else {
406 PDBG("%s added RQT chunk (%x/%x)\n",
407 __func__, rqt_start, rqt_chunk);
408 rqt_start += rqt_chunk;
409 }
410 }
411 return 0;
412}
413
414void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
415{
416 gen_pool_destroy(rdev->rqt_pool);
417}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 000000000000..d0e8af352408
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,550 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __T4_H__
32#define __T4_H__
33
34#include "t4_hw.h"
35#include "t4_regs.h"
36#include "t4_msg.h"
37#include "t4fw_ri_api.h"
38
39#define T4_QID_BASE 1024
40#define T4_MAX_QIDS 256
41#define T4_MAX_NUM_QP (1<<16)
42#define T4_MAX_NUM_CQ (1<<15)
43#define T4_MAX_NUM_PD (1<<15)
44#define T4_MAX_PBL_SIZE 256
45#define T4_MAX_RQ_SIZE 1024
46#define T4_MAX_SQ_SIZE 1024
47#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
48#define T4_MAX_CQ_DEPTH 8192
49#define T4_MAX_NUM_STAG (1<<15)
50#define T4_MAX_MR_SIZE (~0ULL - 1)
51#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
52#define T4_STAG_UNSET 0xffffffff
53#define T4_FW_MAJ 0
54#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
55
56struct t4_status_page {
57 __be32 rsvd1; /* flit 0 - hw owns */
58 __be16 rsvd2;
59 __be16 qid;
60 __be16 cidx;
61 __be16 pidx;
62 u8 qp_err; /* flit 1 - sw owns */
63 u8 db_off;
64};
65
66#define T4_EQ_SIZE 64
67
68#define T4_SQ_NUM_SLOTS 4
69#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
70#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
71 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
72#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
73 sizeof(struct fw_ri_immd)))
74#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
75 sizeof(struct fw_ri_rdma_write_wr) - \
76 sizeof(struct fw_ri_immd)))
77#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
78 sizeof(struct fw_ri_rdma_write_wr) - \
79 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
80#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
81 sizeof(struct fw_ri_immd)))
82#define T4_MAX_FR_DEPTH 255
83
84#define T4_RQ_NUM_SLOTS 2
85#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
86#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
87 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
88
89union t4_wr {
90 struct fw_ri_res_wr res;
91 struct fw_ri_wr ri;
92 struct fw_ri_rdma_write_wr write;
93 struct fw_ri_send_wr send;
94 struct fw_ri_rdma_read_wr read;
95 struct fw_ri_bind_mw_wr bind;
96 struct fw_ri_fr_nsmr_wr fr;
97 struct fw_ri_inv_lstag_wr inv;
98 struct t4_status_page status;
99 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
100};
101
102union t4_recv_wr {
103 struct fw_ri_recv_wr recv;
104 struct t4_status_page status;
105 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
106};
107
108static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
109 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
110{
111 int slots_used;
112
113 wqe->send.opcode = (u8)opcode;
114 wqe->send.flags = flags;
115 wqe->send.wrid = wrid;
116 wqe->send.r1[0] = 0;
117 wqe->send.r1[1] = 0;
118 wqe->send.r1[2] = 0;
119 wqe->send.len16 = len16;
120
121 slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
122 while (slots_used < T4_SQ_NUM_SLOTS) {
123 wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
124 slots_used++;
125 }
126}
127
128/* CQE/AE status codes */
129#define T4_ERR_SUCCESS 0x0
130#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
131 /* STAG is offlimt, being 0, */
132 /* or STAG_key mismatch */
133#define T4_ERR_PDID 0x2 /* PDID mismatch */
134#define T4_ERR_QPID 0x3 /* QPID mismatch */
135#define T4_ERR_ACCESS 0x4 /* Invalid access right */
136#define T4_ERR_WRAP 0x5 /* Wrap error */
137#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
138#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
139 /* shared memory region */
140#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
141 /* shared memory region */
142#define T4_ERR_ECC 0x9 /* ECC error detected */
143#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
144 /* reading PSTAG for a MW */
145 /* Invalidate */
146#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
147 /* software error */
148#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
149#define T4_ERR_CRC 0x10 /* CRC error */
150#define T4_ERR_MARKER 0x11 /* Marker error */
151#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
152#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
153#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
154#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
155#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
156#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
157#define T4_ERR_MSN 0x18 /* MSN error */
158#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
159#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
160 /* or READ_REQ */
161#define T4_ERR_MSN_GAP 0x1B
162#define T4_ERR_MSN_RANGE 0x1C
163#define T4_ERR_IRD_OVERFLOW 0x1D
164#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
165 /* software error */
166#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
167 /* mismatch) */
168/*
169 * CQE defs
170 */
171struct t4_cqe {
172 __be32 header;
173 __be32 len;
174 union {
175 struct {
176 __be32 stag;
177 __be32 msn;
178 } rcqe;
179 struct {
180 u32 nada1;
181 u16 nada2;
182 u16 cidx;
183 } scqe;
184 struct {
185 __be32 wrid_hi;
186 __be32 wrid_low;
187 } gen;
188 } u;
189 __be64 reserved;
190 __be64 bits_type_ts;
191};
192
193/* macros for flit 0 of the cqe */
194
195#define S_CQE_QPID 12
196#define M_CQE_QPID 0xFFFFF
197#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
198#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
199
200#define S_CQE_SWCQE 11
201#define M_CQE_SWCQE 0x1
202#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
203#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
204
205#define S_CQE_STATUS 5
206#define M_CQE_STATUS 0x1F
207#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
208#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
209
210#define S_CQE_TYPE 4
211#define M_CQE_TYPE 0x1
212#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
213#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
214
215#define S_CQE_OPCODE 0
216#define M_CQE_OPCODE 0xF
217#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
218#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
219
220#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
221#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
222#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
223#define SQ_TYPE(x) (CQE_TYPE((x)))
224#define RQ_TYPE(x) (!CQE_TYPE((x)))
225#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
226#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
227
228#define CQE_SEND_OPCODE(x)( \
229 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
230 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
231 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
232 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
233
234#define CQE_LEN(x) (be32_to_cpu((x)->len))
235
236/* used for RQ completion processing */
237#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
238#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
239
240/* used for SQ completion processing */
241#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
242
243/* generic accessor macros */
244#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
245#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
246
247/* macros for flit 3 of the cqe */
248#define S_CQE_GENBIT 63
249#define M_CQE_GENBIT 0x1
250#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
251#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
252
253#define S_CQE_OVFBIT 62
254#define M_CQE_OVFBIT 0x1
255#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
256
257#define S_CQE_IQTYPE 60
258#define M_CQE_IQTYPE 0x3
259#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
260
261#define M_CQE_TS 0x0fffffffffffffffULL
262#define G_CQE_TS(x) ((x) & M_CQE_TS)
263
264#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
265#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
266#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
267
268struct t4_swsqe {
269 u64 wr_id;
270 struct t4_cqe cqe;
271 int read_len;
272 int opcode;
273 int complete;
274 int signaled;
275 u16 idx;
276};
277
278struct t4_sq {
279 union t4_wr *queue;
280 dma_addr_t dma_addr;
281 DECLARE_PCI_UNMAP_ADDR(mapping);
282 struct t4_swsqe *sw_sq;
283 struct t4_swsqe *oldest_read;
284 u64 udb;
285 size_t memsize;
286 u32 qid;
287 u16 in_use;
288 u16 size;
289 u16 cidx;
290 u16 pidx;
291};
292
293struct t4_swrqe {
294 u64 wr_id;
295};
296
297struct t4_rq {
298 union t4_recv_wr *queue;
299 dma_addr_t dma_addr;
300 DECLARE_PCI_UNMAP_ADDR(mapping);
301 struct t4_swrqe *sw_rq;
302 u64 udb;
303 size_t memsize;
304 u32 qid;
305 u32 msn;
306 u32 rqt_hwaddr;
307 u16 rqt_size;
308 u16 in_use;
309 u16 size;
310 u16 cidx;
311 u16 pidx;
312};
313
314struct t4_wq {
315 struct t4_sq sq;
316 struct t4_rq rq;
317 void __iomem *db;
318 void __iomem *gts;
319 struct c4iw_rdev *rdev;
320};
321
322static inline int t4_rqes_posted(struct t4_wq *wq)
323{
324 return wq->rq.in_use;
325}
326
327static inline int t4_rq_empty(struct t4_wq *wq)
328{
329 return wq->rq.in_use == 0;
330}
331
332static inline int t4_rq_full(struct t4_wq *wq)
333{
334 return wq->rq.in_use == (wq->rq.size - 1);
335}
336
337static inline u32 t4_rq_avail(struct t4_wq *wq)
338{
339 return wq->rq.size - 1 - wq->rq.in_use;
340}
341
342static inline void t4_rq_produce(struct t4_wq *wq)
343{
344 wq->rq.in_use++;
345 if (++wq->rq.pidx == wq->rq.size)
346 wq->rq.pidx = 0;
347}
348
349static inline void t4_rq_consume(struct t4_wq *wq)
350{
351 wq->rq.in_use--;
352 wq->rq.msn++;
353 if (++wq->rq.cidx == wq->rq.size)
354 wq->rq.cidx = 0;
355}
356
357static inline int t4_sq_empty(struct t4_wq *wq)
358{
359 return wq->sq.in_use == 0;
360}
361
362static inline int t4_sq_full(struct t4_wq *wq)
363{
364 return wq->sq.in_use == (wq->sq.size - 1);
365}
366
367static inline u32 t4_sq_avail(struct t4_wq *wq)
368{
369 return wq->sq.size - 1 - wq->sq.in_use;
370}
371
372static inline void t4_sq_produce(struct t4_wq *wq)
373{
374 wq->sq.in_use++;
375 if (++wq->sq.pidx == wq->sq.size)
376 wq->sq.pidx = 0;
377}
378
379static inline void t4_sq_consume(struct t4_wq *wq)
380{
381 wq->sq.in_use--;
382 if (++wq->sq.cidx == wq->sq.size)
383 wq->sq.cidx = 0;
384}
385
386static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
387{
388 inc *= T4_SQ_NUM_SLOTS;
389 wmb();
390 writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
391}
392
393static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
394{
395 inc *= T4_RQ_NUM_SLOTS;
396 wmb();
397 writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
398}
399
400static inline int t4_wq_in_error(struct t4_wq *wq)
401{
402 return wq->sq.queue[wq->sq.size].status.qp_err;
403}
404
405static inline void t4_set_wq_in_error(struct t4_wq *wq)
406{
407 wq->sq.queue[wq->sq.size].status.qp_err = 1;
408 wq->rq.queue[wq->rq.size].status.qp_err = 1;
409}
410
411static inline void t4_disable_wq_db(struct t4_wq *wq)
412{
413 wq->sq.queue[wq->sq.size].status.db_off = 1;
414 wq->rq.queue[wq->rq.size].status.db_off = 1;
415}
416
417static inline void t4_enable_wq_db(struct t4_wq *wq)
418{
419 wq->sq.queue[wq->sq.size].status.db_off = 0;
420 wq->rq.queue[wq->rq.size].status.db_off = 0;
421}
422
423static inline int t4_wq_db_enabled(struct t4_wq *wq)
424{
425 return !wq->sq.queue[wq->sq.size].status.db_off;
426}
427
428struct t4_cq {
429 struct t4_cqe *queue;
430 dma_addr_t dma_addr;
431 DECLARE_PCI_UNMAP_ADDR(mapping);
432 struct t4_cqe *sw_queue;
433 void __iomem *gts;
434 struct c4iw_rdev *rdev;
435 u64 ugts;
436 size_t memsize;
437 u64 timestamp;
438 u32 cqid;
439 u16 size; /* including status page */
440 u16 cidx;
441 u16 sw_pidx;
442 u16 sw_cidx;
443 u16 sw_in_use;
444 u16 cidx_inc;
445 u8 gen;
446 u8 error;
447};
448
449static inline int t4_arm_cq(struct t4_cq *cq, int se)
450{
451 u32 val;
452 u16 inc;
453
454 do {
455 /*
456 * inc must be less the both the max update value -and-
457 * the size of the CQ.
458 */
459 inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
460 CIDXINC_MASK;
461 inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
462 if (inc == cq->cidx_inc)
463 val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
464 INGRESSQID(cq->cqid);
465 else
466 val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
467 INGRESSQID(cq->cqid);
468 cq->cidx_inc -= inc;
469 writel(val, cq->gts);
470 } while (cq->cidx_inc);
471 return 0;
472}
473
474static inline void t4_swcq_produce(struct t4_cq *cq)
475{
476 cq->sw_in_use++;
477 if (++cq->sw_pidx == cq->size)
478 cq->sw_pidx = 0;
479}
480
481static inline void t4_swcq_consume(struct t4_cq *cq)
482{
483 cq->sw_in_use--;
484 if (++cq->sw_cidx == cq->size)
485 cq->sw_cidx = 0;
486}
487
488static inline void t4_hwcq_consume(struct t4_cq *cq)
489{
490 cq->cidx_inc++;
491 if (++cq->cidx == cq->size) {
492 cq->cidx = 0;
493 cq->gen ^= 1;
494 }
495}
496
497static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
498{
499 return (CQE_GENBIT(cqe) == cq->gen);
500}
501
502static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
503{
504 int ret = 0;
505 u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
506
507 if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
508 *cqe = &cq->queue[cq->cidx];
509 cq->timestamp = G_CQE_TS(bits_type_ts);
510 } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
511 ret = -EOVERFLOW;
512 else
513 ret = -ENODATA;
514 if (ret == -EOVERFLOW) {
515 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
516 cq->error = 1;
517 }
518 return ret;
519}
520
521static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
522{
523 if (cq->sw_in_use)
524 return &cq->sw_queue[cq->sw_cidx];
525 return NULL;
526}
527
528static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
529{
530 int ret = 0;
531
532 if (cq->error)
533 ret = -ENODATA;
534 else if (cq->sw_in_use)
535 *cqe = &cq->sw_queue[cq->sw_cidx];
536 else
537 ret = t4_next_hw_cqe(cq, cqe);
538 return ret;
539}
540
541static inline int t4_cq_in_error(struct t4_cq *cq)
542{
543 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
544}
545
546static inline void t4_set_cq_in_error(struct t4_cq *cq)
547{
548 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
549}
550#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 000000000000..fc706bd07fae
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef _T4FW_RI_API_H_
32#define _T4FW_RI_API_H_
33
34#include "t4fw_api.h"
35
36enum fw_ri_wr_opcode {
37 FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
38 FW_RI_READ_REQ = 0x1,
39 FW_RI_READ_RESP = 0x2,
40 FW_RI_SEND = 0x3,
41 FW_RI_SEND_WITH_INV = 0x4,
42 FW_RI_SEND_WITH_SE = 0x5,
43 FW_RI_SEND_WITH_SE_INV = 0x6,
44 FW_RI_TERMINATE = 0x7,
45 FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
46 FW_RI_BIND_MW = 0x9,
47 FW_RI_FAST_REGISTER = 0xa,
48 FW_RI_LOCAL_INV = 0xb,
49 FW_RI_QP_MODIFY = 0xc,
50 FW_RI_BYPASS = 0xd,
51 FW_RI_RECEIVE = 0xe,
52
53 FW_RI_SGE_EC_CR_RETURN = 0xf
54};
55
56enum fw_ri_wr_flags {
57 FW_RI_COMPLETION_FLAG = 0x01,
58 FW_RI_NOTIFICATION_FLAG = 0x02,
59 FW_RI_SOLICITED_EVENT_FLAG = 0x04,
60 FW_RI_READ_FENCE_FLAG = 0x08,
61 FW_RI_LOCAL_FENCE_FLAG = 0x10,
62 FW_RI_RDMA_READ_INVALIDATE = 0x20
63};
64
65enum fw_ri_mpa_attrs {
66 FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
67 FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
68 FW_RI_MPA_CRC_ENABLE = 0x04,
69 FW_RI_MPA_IETF_ENABLE = 0x08
70};
71
72enum fw_ri_qp_caps {
73 FW_RI_QP_RDMA_READ_ENABLE = 0x01,
74 FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
75 FW_RI_QP_BIND_ENABLE = 0x04,
76 FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
77 FW_RI_QP_STAG0_ENABLE = 0x10
78};
79
80enum fw_ri_addr_type {
81 FW_RI_ZERO_BASED_TO = 0x00,
82 FW_RI_VA_BASED_TO = 0x01
83};
84
85enum fw_ri_mem_perms {
86 FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
87 FW_RI_MEM_ACCESS_REM_READ = 0x02,
88 FW_RI_MEM_ACCESS_REM = 0x03,
89 FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
90 FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
91 FW_RI_MEM_ACCESS_LOCAL = 0x0C
92};
93
94enum fw_ri_stag_type {
95 FW_RI_STAG_NSMR = 0x00,
96 FW_RI_STAG_SMR = 0x01,
97 FW_RI_STAG_MW = 0x02,
98 FW_RI_STAG_MW_RELAXED = 0x03
99};
100
101enum fw_ri_data_op {
102 FW_RI_DATA_IMMD = 0x81,
103 FW_RI_DATA_DSGL = 0x82,
104 FW_RI_DATA_ISGL = 0x83
105};
106
107enum fw_ri_sgl_depth {
108 FW_RI_SGL_DEPTH_MAX_SQ = 16,
109 FW_RI_SGL_DEPTH_MAX_RQ = 4
110};
111
112struct fw_ri_dsge_pair {
113 __be32 len[2];
114 __be64 addr[2];
115};
116
117struct fw_ri_dsgl {
118 __u8 op;
119 __u8 r1;
120 __be16 nsge;
121 __be32 len0;
122 __be64 addr0;
123#ifndef C99_NOT_SUPPORTED
124 struct fw_ri_dsge_pair sge[0];
125#endif
126};
127
128struct fw_ri_sge {
129 __be32 stag;
130 __be32 len;
131 __be64 to;
132};
133
134struct fw_ri_isgl {
135 __u8 op;
136 __u8 r1;
137 __be16 nsge;
138 __be32 r2;
139#ifndef C99_NOT_SUPPORTED
140 struct fw_ri_sge sge[0];
141#endif
142};
143
144struct fw_ri_immd {
145 __u8 op;
146 __u8 r1;
147 __be16 r2;
148 __be32 immdlen;
149#ifndef C99_NOT_SUPPORTED
150 __u8 data[0];
151#endif
152};
153
154struct fw_ri_tpte {
155 __be32 valid_to_pdid;
156 __be32 locread_to_qpid;
157 __be32 nosnoop_pbladdr;
158 __be32 len_lo;
159 __be32 va_hi;
160 __be32 va_lo_fbo;
161 __be32 dca_mwbcnt_pstag;
162 __be32 len_hi;
163};
164
165#define S_FW_RI_TPTE_VALID 31
166#define M_FW_RI_TPTE_VALID 0x1
167#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
168#define G_FW_RI_TPTE_VALID(x) \
169 (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
170#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
171
172#define S_FW_RI_TPTE_STAGKEY 23
173#define M_FW_RI_TPTE_STAGKEY 0xff
174#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
175#define G_FW_RI_TPTE_STAGKEY(x) \
176 (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
177
178#define S_FW_RI_TPTE_STAGSTATE 22
179#define M_FW_RI_TPTE_STAGSTATE 0x1
180#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
181#define G_FW_RI_TPTE_STAGSTATE(x) \
182 (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
183#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
184
185#define S_FW_RI_TPTE_STAGTYPE 20
186#define M_FW_RI_TPTE_STAGTYPE 0x3
187#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
188#define G_FW_RI_TPTE_STAGTYPE(x) \
189 (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
190
191#define S_FW_RI_TPTE_PDID 0
192#define M_FW_RI_TPTE_PDID 0xfffff
193#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
194#define G_FW_RI_TPTE_PDID(x) \
195 (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
196
197#define S_FW_RI_TPTE_PERM 28
198#define M_FW_RI_TPTE_PERM 0xf
199#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
200#define G_FW_RI_TPTE_PERM(x) \
201 (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
202
203#define S_FW_RI_TPTE_REMINVDIS 27
204#define M_FW_RI_TPTE_REMINVDIS 0x1
205#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
206#define G_FW_RI_TPTE_REMINVDIS(x) \
207 (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
208#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
209
210#define S_FW_RI_TPTE_ADDRTYPE 26
211#define M_FW_RI_TPTE_ADDRTYPE 1
212#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
213#define G_FW_RI_TPTE_ADDRTYPE(x) \
214 (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
215#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
216
217#define S_FW_RI_TPTE_MWBINDEN 25
218#define M_FW_RI_TPTE_MWBINDEN 0x1
219#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
220#define G_FW_RI_TPTE_MWBINDEN(x) \
221 (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
222#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
223
224#define S_FW_RI_TPTE_PS 20
225#define M_FW_RI_TPTE_PS 0x1f
226#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
227#define G_FW_RI_TPTE_PS(x) \
228 (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
229
230#define S_FW_RI_TPTE_QPID 0
231#define M_FW_RI_TPTE_QPID 0xfffff
232#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
233#define G_FW_RI_TPTE_QPID(x) \
234 (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
235
236#define S_FW_RI_TPTE_NOSNOOP 30
237#define M_FW_RI_TPTE_NOSNOOP 0x1
238#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
239#define G_FW_RI_TPTE_NOSNOOP(x) \
240 (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
241#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
242
243#define S_FW_RI_TPTE_PBLADDR 0
244#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
245#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
246#define G_FW_RI_TPTE_PBLADDR(x) \
247 (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
248
249#define S_FW_RI_TPTE_DCA 24
250#define M_FW_RI_TPTE_DCA 0x1f
251#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
252#define G_FW_RI_TPTE_DCA(x) \
253 (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
254
255#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
256#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
257#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
258 ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
259#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
260 (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
261
262enum fw_ri_res_type {
263 FW_RI_RES_TYPE_SQ,
264 FW_RI_RES_TYPE_RQ,
265 FW_RI_RES_TYPE_CQ,
266};
267
268enum fw_ri_res_op {
269 FW_RI_RES_OP_WRITE,
270 FW_RI_RES_OP_RESET,
271};
272
273struct fw_ri_res {
274 union fw_ri_restype {
275 struct fw_ri_res_sqrq {
276 __u8 restype;
277 __u8 op;
278 __be16 r3;
279 __be32 eqid;
280 __be32 r4[2];
281 __be32 fetchszm_to_iqid;
282 __be32 dcaen_to_eqsize;
283 __be64 eqaddr;
284 } sqrq;
285 struct fw_ri_res_cq {
286 __u8 restype;
287 __u8 op;
288 __be16 r3;
289 __be32 iqid;
290 __be32 r4[2];
291 __be32 iqandst_to_iqandstindex;
292 __be16 iqdroprss_to_iqesize;
293 __be16 iqsize;
294 __be64 iqaddr;
295 __be32 iqns_iqro;
296 __be32 r6_lo;
297 __be64 r7;
298 } cq;
299 } u;
300};
301
302struct fw_ri_res_wr {
303 __be32 op_nres;
304 __be32 len16_pkd;
305 __u64 cookie;
306#ifndef C99_NOT_SUPPORTED
307 struct fw_ri_res res[0];
308#endif
309};
310
311#define S_FW_RI_RES_WR_NRES 0
312#define M_FW_RI_RES_WR_NRES 0xff
313#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
314#define G_FW_RI_RES_WR_NRES(x) \
315 (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
316
317#define S_FW_RI_RES_WR_FETCHSZM 26
318#define M_FW_RI_RES_WR_FETCHSZM 0x1
319#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
320#define G_FW_RI_RES_WR_FETCHSZM(x) \
321 (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
322#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
323
324#define S_FW_RI_RES_WR_STATUSPGNS 25
325#define M_FW_RI_RES_WR_STATUSPGNS 0x1
326#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
327#define G_FW_RI_RES_WR_STATUSPGNS(x) \
328 (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
329#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
330
331#define S_FW_RI_RES_WR_STATUSPGRO 24
332#define M_FW_RI_RES_WR_STATUSPGRO 0x1
333#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
334#define G_FW_RI_RES_WR_STATUSPGRO(x) \
335 (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
336#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
337
338#define S_FW_RI_RES_WR_FETCHNS 23
339#define M_FW_RI_RES_WR_FETCHNS 0x1
340#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
341#define G_FW_RI_RES_WR_FETCHNS(x) \
342 (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
343#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
344
345#define S_FW_RI_RES_WR_FETCHRO 22
346#define M_FW_RI_RES_WR_FETCHRO 0x1
347#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
348#define G_FW_RI_RES_WR_FETCHRO(x) \
349 (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
350#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
351
352#define S_FW_RI_RES_WR_HOSTFCMODE 20
353#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
354#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
355#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
356 (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
357
358#define S_FW_RI_RES_WR_CPRIO 19
359#define M_FW_RI_RES_WR_CPRIO 0x1
360#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
361#define G_FW_RI_RES_WR_CPRIO(x) \
362 (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
363#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
364
365#define S_FW_RI_RES_WR_ONCHIP 18
366#define M_FW_RI_RES_WR_ONCHIP 0x1
367#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
368#define G_FW_RI_RES_WR_ONCHIP(x) \
369 (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
370#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
371
372#define S_FW_RI_RES_WR_PCIECHN 16
373#define M_FW_RI_RES_WR_PCIECHN 0x3
374#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
375#define G_FW_RI_RES_WR_PCIECHN(x) \
376 (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
377
378#define S_FW_RI_RES_WR_IQID 0
379#define M_FW_RI_RES_WR_IQID 0xffff
380#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
381#define G_FW_RI_RES_WR_IQID(x) \
382 (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
383
384#define S_FW_RI_RES_WR_DCAEN 31
385#define M_FW_RI_RES_WR_DCAEN 0x1
386#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
387#define G_FW_RI_RES_WR_DCAEN(x) \
388 (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
389#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
390
391#define S_FW_RI_RES_WR_DCACPU 26
392#define M_FW_RI_RES_WR_DCACPU 0x1f
393#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
394#define G_FW_RI_RES_WR_DCACPU(x) \
395 (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
396
397#define S_FW_RI_RES_WR_FBMIN 23
398#define M_FW_RI_RES_WR_FBMIN 0x7
399#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
400#define G_FW_RI_RES_WR_FBMIN(x) \
401 (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
402
403#define S_FW_RI_RES_WR_FBMAX 20
404#define M_FW_RI_RES_WR_FBMAX 0x7
405#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
406#define G_FW_RI_RES_WR_FBMAX(x) \
407 (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
408
409#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
410#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
411#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
412#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
413 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
414#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
415
416#define S_FW_RI_RES_WR_CIDXFTHRESH 16
417#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
418#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
419#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
420 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
421
422#define S_FW_RI_RES_WR_EQSIZE 0
423#define M_FW_RI_RES_WR_EQSIZE 0xffff
424#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
425#define G_FW_RI_RES_WR_EQSIZE(x) \
426 (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
427
428#define S_FW_RI_RES_WR_IQANDST 15
429#define M_FW_RI_RES_WR_IQANDST 0x1
430#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
431#define G_FW_RI_RES_WR_IQANDST(x) \
432 (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
433#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
434
435#define S_FW_RI_RES_WR_IQANUS 14
436#define M_FW_RI_RES_WR_IQANUS 0x1
437#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
438#define G_FW_RI_RES_WR_IQANUS(x) \
439 (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
440#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
441
442#define S_FW_RI_RES_WR_IQANUD 12
443#define M_FW_RI_RES_WR_IQANUD 0x3
444#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
445#define G_FW_RI_RES_WR_IQANUD(x) \
446 (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
447
448#define S_FW_RI_RES_WR_IQANDSTINDEX 0
449#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
450#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
451#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
452 (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
453
454#define S_FW_RI_RES_WR_IQDROPRSS 15
455#define M_FW_RI_RES_WR_IQDROPRSS 0x1
456#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
457#define G_FW_RI_RES_WR_IQDROPRSS(x) \
458 (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
459#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
460
461#define S_FW_RI_RES_WR_IQGTSMODE 14
462#define M_FW_RI_RES_WR_IQGTSMODE 0x1
463#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
464#define G_FW_RI_RES_WR_IQGTSMODE(x) \
465 (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
466#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
467
468#define S_FW_RI_RES_WR_IQPCIECH 12
469#define M_FW_RI_RES_WR_IQPCIECH 0x3
470#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
471#define G_FW_RI_RES_WR_IQPCIECH(x) \
472 (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
473
474#define S_FW_RI_RES_WR_IQDCAEN 11
475#define M_FW_RI_RES_WR_IQDCAEN 0x1
476#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
477#define G_FW_RI_RES_WR_IQDCAEN(x) \
478 (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
479#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
480
481#define S_FW_RI_RES_WR_IQDCACPU 6
482#define M_FW_RI_RES_WR_IQDCACPU 0x1f
483#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
484#define G_FW_RI_RES_WR_IQDCACPU(x) \
485 (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
486
487#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
488#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
489#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
490 ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
491#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
492 (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
493
494#define S_FW_RI_RES_WR_IQO 3
495#define M_FW_RI_RES_WR_IQO 0x1
496#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
497#define G_FW_RI_RES_WR_IQO(x) \
498 (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
499#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
500
501#define S_FW_RI_RES_WR_IQCPRIO 2
502#define M_FW_RI_RES_WR_IQCPRIO 0x1
503#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
504#define G_FW_RI_RES_WR_IQCPRIO(x) \
505 (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
506#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
507
508#define S_FW_RI_RES_WR_IQESIZE 0
509#define M_FW_RI_RES_WR_IQESIZE 0x3
510#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
511#define G_FW_RI_RES_WR_IQESIZE(x) \
512 (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
513
514#define S_FW_RI_RES_WR_IQNS 31
515#define M_FW_RI_RES_WR_IQNS 0x1
516#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
517#define G_FW_RI_RES_WR_IQNS(x) \
518 (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
519#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
520
521#define S_FW_RI_RES_WR_IQRO 30
522#define M_FW_RI_RES_WR_IQRO 0x1
523#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
524#define G_FW_RI_RES_WR_IQRO(x) \
525 (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
526#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
527
528struct fw_ri_rdma_write_wr {
529 __u8 opcode;
530 __u8 flags;
531 __u16 wrid;
532 __u8 r1[3];
533 __u8 len16;
534 __be64 r2;
535 __be32 plen;
536 __be32 stag_sink;
537 __be64 to_sink;
538#ifndef C99_NOT_SUPPORTED
539 union {
540 struct fw_ri_immd immd_src[0];
541 struct fw_ri_isgl isgl_src[0];
542 } u;
543#endif
544};
545
546struct fw_ri_send_wr {
547 __u8 opcode;
548 __u8 flags;
549 __u16 wrid;
550 __u8 r1[3];
551 __u8 len16;
552 __be32 sendop_pkd;
553 __be32 stag_inv;
554 __be32 plen;
555 __be32 r3;
556 __be64 r4;
557#ifndef C99_NOT_SUPPORTED
558 union {
559 struct fw_ri_immd immd_src[0];
560 struct fw_ri_isgl isgl_src[0];
561 } u;
562#endif
563};
564
565#define S_FW_RI_SEND_WR_SENDOP 0
566#define M_FW_RI_SEND_WR_SENDOP 0xf
567#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
568#define G_FW_RI_SEND_WR_SENDOP(x) \
569 (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
570
571struct fw_ri_rdma_read_wr {
572 __u8 opcode;
573 __u8 flags;
574 __u16 wrid;
575 __u8 r1[3];
576 __u8 len16;
577 __be64 r2;
578 __be32 stag_sink;
579 __be32 to_sink_hi;
580 __be32 to_sink_lo;
581 __be32 plen;
582 __be32 stag_src;
583 __be32 to_src_hi;
584 __be32 to_src_lo;
585 __be32 r5;
586};
587
588struct fw_ri_recv_wr {
589 __u8 opcode;
590 __u8 r1;
591 __u16 wrid;
592 __u8 r2[3];
593 __u8 len16;
594 struct fw_ri_isgl isgl;
595};
596
597struct fw_ri_bind_mw_wr {
598 __u8 opcode;
599 __u8 flags;
600 __u16 wrid;
601 __u8 r1[3];
602 __u8 len16;
603 __u8 qpbinde_to_dcacpu;
604 __u8 pgsz_shift;
605 __u8 addr_type;
606 __u8 mem_perms;
607 __be32 stag_mr;
608 __be32 stag_mw;
609 __be32 r3;
610 __be64 len_mw;
611 __be64 va_fbo;
612 __be64 r4;
613};
614
615#define S_FW_RI_BIND_MW_WR_QPBINDE 6
616#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
617#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
618#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
619 (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
620#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
621
622#define S_FW_RI_BIND_MW_WR_NS 5
623#define M_FW_RI_BIND_MW_WR_NS 0x1
624#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
625#define G_FW_RI_BIND_MW_WR_NS(x) \
626 (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
627#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
628
629#define S_FW_RI_BIND_MW_WR_DCACPU 0
630#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
631#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
632#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
633 (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
634
635struct fw_ri_fr_nsmr_wr {
636 __u8 opcode;
637 __u8 flags;
638 __u16 wrid;
639 __u8 r1[3];
640 __u8 len16;
641 __u8 qpbinde_to_dcacpu;
642 __u8 pgsz_shift;
643 __u8 addr_type;
644 __u8 mem_perms;
645 __be32 stag;
646 __be32 len_hi;
647 __be32 len_lo;
648 __be32 va_hi;
649 __be32 va_lo_fbo;
650};
651
652#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
653#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
654#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
655#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
656 (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
657#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
658
659#define S_FW_RI_FR_NSMR_WR_NS 5
660#define M_FW_RI_FR_NSMR_WR_NS 0x1
661#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
662#define G_FW_RI_FR_NSMR_WR_NS(x) \
663 (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
664#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
665
666#define S_FW_RI_FR_NSMR_WR_DCACPU 0
667#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
668#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
669#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
670 (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
671
672struct fw_ri_inv_lstag_wr {
673 __u8 opcode;
674 __u8 flags;
675 __u16 wrid;
676 __u8 r1[3];
677 __u8 len16;
678 __be32 r2;
679 __be32 stag_inv;
680};
681
682enum fw_ri_type {
683 FW_RI_TYPE_INIT,
684 FW_RI_TYPE_FINI,
685 FW_RI_TYPE_TERMINATE
686};
687
688enum fw_ri_init_p2ptype {
689 FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
690 FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
691 FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
692 FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
693 FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
694 FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
695 FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
696};
697
698struct fw_ri_wr {
699 __be32 op_compl;
700 __be32 flowid_len16;
701 __u64 cookie;
702 union fw_ri {
703 struct fw_ri_init {
704 __u8 type;
705 __u8 mpareqbit_p2ptype;
706 __u8 r4[2];
707 __u8 mpa_attrs;
708 __u8 qp_caps;
709 __be16 nrqe;
710 __be32 pdid;
711 __be32 qpid;
712 __be32 sq_eqid;
713 __be32 rq_eqid;
714 __be32 scqid;
715 __be32 rcqid;
716 __be32 ord_max;
717 __be32 ird_max;
718 __be32 iss;
719 __be32 irs;
720 __be32 hwrqsize;
721 __be32 hwrqaddr;
722 __be64 r5;
723 union fw_ri_init_p2p {
724 struct fw_ri_rdma_write_wr write;
725 struct fw_ri_rdma_read_wr read;
726 struct fw_ri_send_wr send;
727 } u;
728 } init;
729 struct fw_ri_fini {
730 __u8 type;
731 __u8 r3[7];
732 __be64 r4;
733 } fini;
734 struct fw_ri_terminate {
735 __u8 type;
736 __u8 r3[3];
737 __be32 immdlen;
738 __u8 termmsg[40];
739 } terminate;
740 } u;
741};
742
743#define S_FW_RI_WR_MPAREQBIT 7
744#define M_FW_RI_WR_MPAREQBIT 0x1
745#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
746#define G_FW_RI_WR_MPAREQBIT(x) \
747 (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
748#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
749
750#define S_FW_RI_WR_P2PTYPE 0
751#define M_FW_RI_WR_P2PTYPE 0xf
752#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
753#define G_FW_RI_WR_P2PTYPE(x) \
754 (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
755
756struct tcp_options {
757 __be16 mss;
758 __u8 wsf;
759#if defined(__LITTLE_ENDIAN_BITFIELD)
760 __u8:4;
761 __u8 unknown:1;
762 __u8:1;
763 __u8 sack:1;
764 __u8 tstamp:1;
765#else
766 __u8 tstamp:1;
767 __u8 sack:1;
768 __u8:1;
769 __u8 unknown:1;
770 __u8:4;
771#endif
772};
773
774struct cpl_pass_accept_req {
775 union opcode_tid ot;
776 __be16 rsvd;
777 __be16 len;
778 __be32 hdr_len;
779 __be16 vlan;
780 __be16 l2info;
781 __be32 tos_stid;
782 struct tcp_options tcpopt;
783};
784
785/* cpl_pass_accept_req.hdr_len fields */
786#define S_SYN_RX_CHAN 0
787#define M_SYN_RX_CHAN 0xF
788#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
789#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
790
791#define S_TCP_HDR_LEN 10
792#define M_TCP_HDR_LEN 0x3F
793#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
794#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
795
796#define S_IP_HDR_LEN 16
797#define M_IP_HDR_LEN 0x3FF
798#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
799#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
800
801#define S_ETH_HDR_LEN 26
802#define M_ETH_HDR_LEN 0x1F
803#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
804#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
805
806/* cpl_pass_accept_req.l2info fields */
807#define S_SYN_MAC_IDX 0
808#define M_SYN_MAC_IDX 0x1FF
809#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
810#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
811
812#define S_SYN_XACT_MATCH 9
813#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
814#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
815
816#define S_SYN_INTF 12
817#define M_SYN_INTF 0xF
818#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
819#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
820
821struct ulptx_idata {
822 __be32 cmd_more;
823 __be32 len;
824};
825
826#define S_ULPTX_NSGE 0
827#define M_ULPTX_NSGE 0xFFFF
828#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
829#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 000000000000..ed6414abde02
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __C4IW_USER_H__
33#define __C4IW_USER_H__
34
35#define C4IW_UVERBS_ABI_VERSION 1
36
37/*
38 * Make sure that all structs defined in this file remain laid out so
39 * that they pack the same way on 32-bit and 64-bit architectures (to
40 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
41 * In particular do not use pointer types -- pass pointers in __u64
42 * instead.
43 */
44struct c4iw_create_cq_resp {
45 __u64 key;
46 __u64 gts_key;
47 __u64 memsize;
48 __u32 cqid;
49 __u32 size;
50 __u32 qid_mask;
51};
52
53struct c4iw_create_qp_resp {
54 __u64 sq_key;
55 __u64 rq_key;
56 __u64 sq_db_gts_key;
57 __u64 rq_db_gts_key;
58 __u64 sq_memsize;
59 __u64 rq_memsize;
60 __u32 sqid;
61 __u32 rqid;
62 __u32 sq_size;
63 __u32 rq_size;
64 __u32 qid_mask;
65};
66#endif
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac57..5a219a2fdf16 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
661 wc->opcode = IB_WC_FETCH_ADD; 661 wc->opcode = IB_WC_FETCH_ADD;
662 wc->byte_len = 8; 662 wc->byte_len = 8;
663 break; 663 break;
664 case MLX4_OPCODE_MASKED_ATOMIC_CS:
665 wc->opcode = IB_WC_MASKED_COMP_SWAP;
666 wc->byte_len = 8;
667 break;
668 case MLX4_OPCODE_MASKED_ATOMIC_FA:
669 wc->opcode = IB_WC_MASKED_FETCH_ADD;
670 wc->byte_len = 8;
671 break;
664 case MLX4_OPCODE_BIND_MW: 672 case MLX4_OPCODE_BIND_MW:
665 wc->opcode = IB_WC_BIND_MW; 673 wc->opcode = IB_WC_BIND_MW;
666 break; 674 break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f93355..39051417054c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
141 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 141 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
142 props->masked_atomic_cap = IB_ATOMIC_HCA;
142 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 143 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
143 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 144 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
144 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 145 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffef..6a60827b2301 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
74}; 74};
75 75
76static const __be32 mlx4_ib_opcode[] = { 76static const __be32 mlx4_ib_opcode[] = {
77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
88 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
89 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
88}; 90};
89 91
90static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 92static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1407 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1409 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1408 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1410 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1409 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1411 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1412 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1413 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1414 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1410 } else { 1415 } else {
1411 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1416 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1412 aseg->compare = 0; 1417 aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1414 1419
1415} 1420}
1416 1421
1422static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1423 struct ib_send_wr *wr)
1424{
1425 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1426 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1427 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1428 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1429}
1430
1417static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1431static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1418 struct ib_send_wr *wr) 1432 struct ib_send_wr *wr)
1419{ 1433{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1567 switch (wr->opcode) { 1581 switch (wr->opcode) {
1568 case IB_WR_ATOMIC_CMP_AND_SWP: 1582 case IB_WR_ATOMIC_CMP_AND_SWP:
1569 case IB_WR_ATOMIC_FETCH_AND_ADD: 1583 case IB_WR_ATOMIC_FETCH_AND_ADD:
1584 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1570 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1585 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1571 wr->wr.atomic.rkey); 1586 wr->wr.atomic.rkey);
1572 wqe += sizeof (struct mlx4_wqe_raddr_seg); 1587 wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1579 1594
1580 break; 1595 break;
1581 1596
1597 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1598 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1599 wr->wr.atomic.rkey);
1600 wqe += sizeof (struct mlx4_wqe_raddr_seg);
1601
1602 set_masked_atomic_seg(wqe, wr);
1603 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
1604
1605 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1606 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
1607
1608 break;
1609
1582 case IB_WR_RDMA_READ: 1610 case IB_WR_RDMA_READ:
1583 case IB_WR_RDMA_WRITE: 1611 case IB_WR_RDMA_WRITE:
1584 case IB_WR_RDMA_WRITE_WITH_IMM: 1612 case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab60..b4e0cf4e95cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
211 if (!buf->direct.buf) 211 if (!buf->direct.buf)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
214 pci_unmap_addr_set(&buf->direct, mapping, t); 214 dma_unmap_addr_set(&buf->direct, mapping, t);
215 215
216 memset(buf->direct.buf, 0, size); 216 memset(buf->direct.buf, 0, size);
217 217
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
251 goto err_free; 251 goto err_free;
252 252
253 dma_list[i] = t; 253 dma_list[i] = t;
254 pci_unmap_addr_set(&buf->page_list[i], mapping, t); 254 dma_unmap_addr_set(&buf->page_list[i], mapping, t);
255 255
256 clear_page(buf->page_list[i].buf); 256 clear_page(buf->page_list[i].buf);
257 } 257 }
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
289 289
290 if (is_direct) 290 if (is_direct)
291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
292 pci_unmap_addr(&buf->direct, mapping)); 292 dma_unmap_addr(&buf->direct, mapping));
293 else { 293 else {
294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
296 buf->page_list[i].buf, 296 buf->page_list[i].buf,
297 pci_unmap_addr(&buf->page_list[i], 297 dma_unmap_addr(&buf->page_list[i],
298 mapping)); 298 mapping));
299 kfree(buf->page_list); 299 kfree(buf->page_list);
300 } 300 }
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b6053..8e8c728aff88 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
504 goto err_out_free_pages; 504 goto err_out_free_pages;
505 505
506 dma_list[i] = t; 506 dma_list[i] = t;
507 pci_unmap_addr_set(&eq->page_list[i], mapping, t); 507 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
508 508
509 clear_page(eq->page_list[i].buf); 509 clear_page(eq->page_list[i].buf);
510 } 510 }
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
579 if (eq->page_list[i].buf) 579 if (eq->page_list[i].buf)
580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
581 eq->page_list[i].buf, 581 eq->page_list[i].buf,
582 pci_unmap_addr(&eq->page_list[i], 582 dma_unmap_addr(&eq->page_list[i],
583 mapping)); 583 mapping));
584 584
585 mthca_free_mailbox(dev, mailbox); 585 mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
629 for (i = 0; i < npages; ++i) 629 for (i = 0; i < npages; ++i)
630 pci_free_consistent(dev->pdev, PAGE_SIZE, 630 pci_free_consistent(dev->pdev, PAGE_SIZE,
631 eq->page_list[i].buf, 631 eq->page_list[i].buf,
632 pci_unmap_addr(&eq->page_list[i], mapping)); 632 dma_unmap_addr(&eq->page_list[i], mapping));
633 633
634 kfree(eq->page_list); 634 kfree(eq->page_list);
635 mthca_free_mailbox(dev, mailbox); 635 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e983..596acc45569b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
46 46
47struct mthca_buf_list { 47struct mthca_buf_list {
48 void *buf; 48 void *buf;
49 DECLARE_PCI_UNMAP_ADDR(mapping) 49 DEFINE_DMA_UNMAP_ADDR(mapping);
50}; 50};
51 51
52union mthca_buf { 52union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f514929..86acb7d57064 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
1297/** 1297/**
1298 * nes_init_1g_phy 1298 * nes_init_1g_phy
1299 */ 1299 */
1300int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1300static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1301{ 1301{
1302 u32 counter = 0; 1302 u32 counter = 0;
1303 u16 phy_data; 1303 u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1351/** 1351/**
1352 * nes_init_2025_phy 1352 * nes_init_2025_phy
1353 */ 1353 */
1354int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1354static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1355{ 1355{
1356 u32 temp_phy_data = 0; 1356 u32 temp_phy_data = 0;
1357 u32 temp_phy_data2 = 0; 1357 u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2458 return; 2458 return;
2459 } 2459 }
2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; 2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
2461 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2462 2461
2463 /* ack the MAC interrupt */ 2462 /* ack the MAC interrupt */
2464 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); 2463 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2469 2468
2470 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { 2469 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
2471 nesdev->link_status_interrupts++; 2470 nesdev->link_status_interrupts++;
2472 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) { 2471 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
2473 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2474 nes_reset_link(nesdev, mac_index); 2472 nes_reset_link(nesdev, mac_index);
2475 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2473
2476 }
2477 /* read the PHY interrupt status register */ 2474 /* read the PHY interrupt status register */
2478 if ((nesadapter->OneG_Mode) && 2475 if ((nesadapter->OneG_Mode) &&
2479 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { 2476 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2587,6 +2584,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2587 break; 2584 break;
2588 } 2585 }
2589 } 2586 }
2587 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2590 2588
2591 if (phy_data & 0x0004) { 2589 if (phy_data & 0x0004) {
2592 if (wide_ppm_offset && 2590 if (wide_ppm_offset &&
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..9f4cadf9f851 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1461,11 +1461,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1461 et_cmd->transceiver = XCVR_INTERNAL; 1461 et_cmd->transceiver = XCVR_INTERNAL;
1462 et_cmd->phy_address = mac_index; 1462 et_cmd->phy_address = mac_index;
1463 } else { 1463 } else {
1464 unsigned long flags;
1464 et_cmd->supported = SUPPORTED_1000baseT_Full 1465 et_cmd->supported = SUPPORTED_1000baseT_Full
1465 | SUPPORTED_Autoneg; 1466 | SUPPORTED_Autoneg;
1466 et_cmd->advertising = ADVERTISED_1000baseT_Full 1467 et_cmd->advertising = ADVERTISED_1000baseT_Full
1467 | ADVERTISED_Autoneg; 1468 | ADVERTISED_Autoneg;
1469 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1468 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1470 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1471 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1469 if (phy_data & 0x1000) 1472 if (phy_data & 0x1000)
1470 et_cmd->autoneg = AUTONEG_ENABLE; 1473 et_cmd->autoneg = AUTONEG_ENABLE;
1471 else 1474 else
@@ -1503,12 +1506,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1503 struct nes_vnic *nesvnic = netdev_priv(netdev); 1506 struct nes_vnic *nesvnic = netdev_priv(netdev);
1504 struct nes_device *nesdev = nesvnic->nesdev; 1507 struct nes_device *nesdev = nesvnic->nesdev;
1505 struct nes_adapter *nesadapter = nesdev->nesadapter; 1508 struct nes_adapter *nesadapter = nesdev->nesadapter;
1506 u16 phy_data;
1507 1509
1508 if ((nesadapter->OneG_Mode) && 1510 if ((nesadapter->OneG_Mode) &&
1509 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { 1511 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
1510 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1512 unsigned long flags;
1511 &phy_data); 1513 u16 phy_data;
1514 u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
1515
1516 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1517 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1512 if (et_cmd->autoneg) { 1518 if (et_cmd->autoneg) {
1513 /* Turn on Full duplex, Autoneg, and restart autonegotiation */ 1519 /* Turn on Full duplex, Autoneg, and restart autonegotiation */
1514 phy_data |= 0x1300; 1520 phy_data |= 0x1300;
@@ -1516,8 +1522,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1516 /* Turn off autoneg */ 1522 /* Turn off autoneg */
1517 phy_data &= ~0x1000; 1523 phy_data &= ~0x1000;
1518 } 1524 }
1519 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1525 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1520 phy_data); 1526 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1521 } 1527 }
1522 1528
1523 return 0; 1529 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d86959..a9f5dd272f1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
381 */ 381 */
382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) 382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
383{ 383{
384 struct nes_adapter *nesadapter = nesdev->nesadapter;
385 u32 u32temp; 384 u32 u32temp;
386 u32 counter; 385 u32 counter;
387 unsigned long flags;
388
389 spin_lock_irqsave(&nesadapter->phy_lock, flags);
390 386
391 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 387 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
392 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 388 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
402 if (!(u32temp & 1)) 398 if (!(u32temp & 1))
403 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", 399 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
404 u32temp); 400 u32temp);
405
406 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
407} 401}
408 402
409 403
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
414 */ 408 */
415void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) 409void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
416{ 410{
417 struct nes_adapter *nesadapter = nesdev->nesadapter;
418 u32 u32temp; 411 u32 u32temp;
419 u32 counter; 412 u32 counter;
420 unsigned long flags;
421 413
422 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", 414 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
423 phy_addr, nesdev->mac_index); */ 415 phy_addr, nesdev->mac_index); */
424 spin_lock_irqsave(&nesadapter->phy_lock, flags);
425 416
426 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 417 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
427 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 418 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
441 } else { 432 } else {
442 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 433 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
443 } 434 }
444 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
445} 435}
446 436
447 437
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bdc..925e1f2d1d55 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
374/* 374/*
375 * nes_alloc_fast_reg_mr 375 * nes_alloc_fast_reg_mr
376 */ 376 */
377struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len) 377static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
378{ 378{
379 struct nes_pd *nespd = to_nespd(ibpd); 379 struct nes_pd *nespd = to_nespd(ibpd);
380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); 380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d28..40e858492f90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50} 50}
51 51
52static int ipoib_set_tso(struct net_device *dev, u32 data)
53{
54 struct ipoib_dev_priv *priv = netdev_priv(dev);
55
56 if (data) {
57 if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
58 (dev->features & NETIF_F_SG) &&
59 (priv->hca_caps & IB_DEVICE_UD_TSO)) {
60 dev->features |= NETIF_F_TSO;
61 } else {
62 ipoib_warn(priv, "can't set TSO on\n");
63 return -EOPNOTSUPP;
64 }
65 } else
66 dev->features &= ~NETIF_F_TSO;
67
68 return 0;
69}
70
52static int ipoib_get_coalesce(struct net_device *dev, 71static int ipoib_get_coalesce(struct net_device *dev,
53 struct ethtool_coalesce *coal) 72 struct ethtool_coalesce *coal)
54{ 73{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
131static const struct ethtool_ops ipoib_ethtool_ops = { 150static const struct ethtool_ops ipoib_ethtool_ops = {
132 .get_drvinfo = ipoib_get_drvinfo, 151 .get_drvinfo = ipoib_get_drvinfo,
133 .get_rx_csum = ipoib_get_rx_csum, 152 .get_rx_csum = ipoib_get_rx_csum,
153 .set_tso = ipoib_set_tso,
134 .get_coalesce = ipoib_get_coalesce, 154 .get_coalesce = ipoib_get_coalesce,
135 .set_coalesce = ipoib_set_coalesce, 155 .set_coalesce = ipoib_set_coalesce,
136 .get_flags = ethtool_op_get_flags, 156 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6f..7b2fc98e2f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
325 */ 325 */
326 if (ib_conn) { 326 if (ib_conn) {
327 ib_conn->iser_conn = NULL; 327 ib_conn->iser_conn = NULL;
328 iser_conn_put(ib_conn); 328 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
329 } 329 }
330} 330}
331 331
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
357 /* binds the iSER connection retrieved from the previously 357 /* binds the iSER connection retrieved from the previously
358 * connected ep_handle to the iSCSI layer connection. exchanges 358 * connected ep_handle to the iSCSI layer connection. exchanges
359 * connection pointers */ 359 * connection pointers */
360 iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn); 360 iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
361 conn, conn->dd_data, ib_conn);
361 iser_conn = conn->dd_data; 362 iser_conn = conn->dd_data;
362 ib_conn->iser_conn = iser_conn; 363 ib_conn->iser_conn = iser_conn;
363 iser_conn->ib_conn = ib_conn; 364 iser_conn->ib_conn = ib_conn;
364 iser_conn_get(ib_conn); 365 iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
365 return 0; 366 return 0;
366} 367}
367 368
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
382 * There is no unbind event so the stop callback 383 * There is no unbind event so the stop callback
383 * must release the ref from the bind. 384 * must release the ref from the bind.
384 */ 385 */
385 iser_conn_put(ib_conn); 386 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
386 } 387 }
387 iser_conn->ib_conn = NULL; 388 iser_conn->ib_conn = NULL;
388} 389}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb92..f1df01567bb6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
232 struct ib_cq *tx_cq; 232 struct ib_cq *tx_cq;
233 struct ib_mr *mr; 233 struct ib_mr *mr;
234 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
235 struct ib_event_handler event_handler;
235 struct list_head ig_list; /* entry in ig devices list */ 236 struct list_head ig_list; /* entry in ig devices list */
236 int refcount; 237 int refcount;
237}; 238};
@@ -246,7 +247,6 @@ struct iser_conn {
246 struct rdma_cm_id *cma_id; /* CMA ID */ 247 struct rdma_cm_id *cma_id; /* CMA ID */
247 struct ib_qp *qp; /* QP */ 248 struct ib_qp *qp; /* QP */
248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 249 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
249 int disc_evt_flag; /* disconn event delivered */
250 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
251 int post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
252 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
320 320
321void iser_conn_get(struct iser_conn *ib_conn); 321void iser_conn_get(struct iser_conn *ib_conn);
322 322
323void iser_conn_put(struct iser_conn *ib_conn); 323int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
324 324
325void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
326 326
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a13..9876865732f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
54 iser_err("got qp event %d\n",cause->event); 54 iser_err("got qp event %d\n",cause->event);
55} 55}
56 56
57static void iser_event_handler(struct ib_event_handler *handler,
58 struct ib_event *event)
59{
60 iser_err("async event %d on device %s port %d\n", event->event,
61 event->device->name, event->element.port_num);
62}
63
57/** 64/**
58 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
59 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
96 if (IS_ERR(device->mr)) 103 if (IS_ERR(device->mr))
97 goto dma_mr_err; 104 goto dma_mr_err;
98 105
106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
107 iser_event_handler);
108 if (ib_register_event_handler(&device->event_handler))
109 goto handler_err;
110
99 return 0; 111 return 0;
100 112
113handler_err:
114 ib_dereg_mr(device->mr);
101dma_mr_err: 115dma_mr_err:
102 tasklet_kill(&device->cq_tasklet); 116 tasklet_kill(&device->cq_tasklet);
103cq_arm_err: 117cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
120 BUG_ON(device->mr == NULL); 134 BUG_ON(device->mr == NULL);
121 135
122 tasklet_kill(&device->cq_tasklet); 136 tasklet_kill(&device->cq_tasklet);
123 137 (void)ib_unregister_event_handler(&device->event_handler);
124 (void)ib_dereg_mr(device->mr); 138 (void)ib_dereg_mr(device->mr);
125 (void)ib_destroy_cq(device->tx_cq); 139 (void)ib_destroy_cq(device->tx_cq);
126 (void)ib_destroy_cq(device->rx_cq); 140 (void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
149 device = ib_conn->device; 163 device = ib_conn->device;
150 164
151 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
152 if (!ib_conn->login_buf) { 166 if (!ib_conn->login_buf)
153 goto alloc_err; 167 goto out_err;
154 ret = -ENOMEM;
155 }
156 168
157 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 169 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
158 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 170 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
161 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 173 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
162 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 174 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
163 GFP_KERNEL); 175 GFP_KERNEL);
164 if (!ib_conn->page_vec) { 176 if (!ib_conn->page_vec)
165 ret = -ENOMEM; 177 goto out_err;
166 goto alloc_err; 178
167 }
168 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 179 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
169 180
170 params.page_shift = SHIFT_4K; 181 params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
184 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); 195 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
185 if (IS_ERR(ib_conn->fmr_pool)) { 196 if (IS_ERR(ib_conn->fmr_pool)) {
186 ret = PTR_ERR(ib_conn->fmr_pool); 197 ret = PTR_ERR(ib_conn->fmr_pool);
187 goto fmr_pool_err; 198 ib_conn->fmr_pool = NULL;
199 goto out_err;
188 } 200 }
189 201
190 memset(&init_attr, 0, sizeof init_attr); 202 memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
202 214
203 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 215 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
204 if (ret) 216 if (ret)
205 goto qp_err; 217 goto out_err;
206 218
207 ib_conn->qp = ib_conn->cma_id->qp; 219 ib_conn->qp = ib_conn->cma_id->qp;
208 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 220 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
210 ib_conn->fmr_pool, ib_conn->cma_id->qp); 222 ib_conn->fmr_pool, ib_conn->cma_id->qp);
211 return ret; 223 return ret;
212 224
213qp_err: 225out_err:
214 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
215fmr_pool_err:
216 kfree(ib_conn->page_vec);
217 kfree(ib_conn->login_buf);
218alloc_err:
219 iser_err("unable to alloc mem or create resource, err %d\n", ret); 226 iser_err("unable to alloc mem or create resource, err %d\n", ret);
220 return ret; 227 return ret;
221} 228}
@@ -224,7 +231,7 @@ alloc_err:
224 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 231 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
225 * -1 on failure 232 * -1 on failure
226 */ 233 */
227static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 234static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
228{ 235{
229 BUG_ON(ib_conn == NULL); 236 BUG_ON(ib_conn == NULL);
230 237
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
239 if (ib_conn->qp != NULL) 246 if (ib_conn->qp != NULL)
240 rdma_destroy_qp(ib_conn->cma_id); 247 rdma_destroy_qp(ib_conn->cma_id);
241 248
242 if (ib_conn->cma_id != NULL) 249 /* if cma handler context, the caller acts s.t the cma destroy the id */
250 if (ib_conn->cma_id != NULL && can_destroy_id)
243 rdma_destroy_id(ib_conn->cma_id); 251 rdma_destroy_id(ib_conn->cma_id);
244 252
245 ib_conn->fmr_pool = NULL; 253 ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
317/** 325/**
318 * Frees all conn objects and deallocs conn descriptor 326 * Frees all conn objects and deallocs conn descriptor
319 */ 327 */
320static void iser_conn_release(struct iser_conn *ib_conn) 328static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
321{ 329{
322 struct iser_device *device = ib_conn->device; 330 struct iser_device *device = ib_conn->device;
323 331
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
327 list_del(&ib_conn->conn_list); 335 list_del(&ib_conn->conn_list);
328 mutex_unlock(&ig.connlist_mutex); 336 mutex_unlock(&ig.connlist_mutex);
329 iser_free_rx_descriptors(ib_conn); 337 iser_free_rx_descriptors(ib_conn);
330 iser_free_ib_conn_res(ib_conn); 338 iser_free_ib_conn_res(ib_conn, can_destroy_id);
331 ib_conn->device = NULL; 339 ib_conn->device = NULL;
332 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 340 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
333 if (device != NULL) 341 if (device != NULL)
334 iser_device_try_release(device); 342 iser_device_try_release(device);
335 if (ib_conn->iser_conn)
336 ib_conn->iser_conn->ib_conn = NULL;
337 iscsi_destroy_endpoint(ib_conn->ep); 343 iscsi_destroy_endpoint(ib_conn->ep);
338} 344}
339 345
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
342 atomic_inc(&ib_conn->refcount); 348 atomic_inc(&ib_conn->refcount);
343} 349}
344 350
345void iser_conn_put(struct iser_conn *ib_conn) 351int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
346{ 352{
347 if (atomic_dec_and_test(&ib_conn->refcount)) 353 if (atomic_dec_and_test(&ib_conn->refcount)) {
348 iser_conn_release(ib_conn); 354 iser_conn_release(ib_conn, can_destroy_id);
355 return 1;
356 }
357 return 0;
349} 358}
350 359
351/** 360/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
369 wait_event_interruptible(ib_conn->wait, 378 wait_event_interruptible(ib_conn->wait,
370 ib_conn->state == ISER_CONN_DOWN); 379 ib_conn->state == ISER_CONN_DOWN);
371 380
372 iser_conn_put(ib_conn); 381 iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
373} 382}
374 383
375static void iser_connect_error(struct rdma_cm_id *cma_id) 384static int iser_connect_error(struct rdma_cm_id *cma_id)
376{ 385{
377 struct iser_conn *ib_conn; 386 struct iser_conn *ib_conn;
378 ib_conn = (struct iser_conn *)cma_id->context; 387 ib_conn = (struct iser_conn *)cma_id->context;
379 388
380 ib_conn->state = ISER_CONN_DOWN; 389 ib_conn->state = ISER_CONN_DOWN;
381 wake_up_interruptible(&ib_conn->wait); 390 wake_up_interruptible(&ib_conn->wait);
391 return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
382} 392}
383 393
384static void iser_addr_handler(struct rdma_cm_id *cma_id) 394static int iser_addr_handler(struct rdma_cm_id *cma_id)
385{ 395{
386 struct iser_device *device; 396 struct iser_device *device;
387 struct iser_conn *ib_conn; 397 struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
390 device = iser_device_find_by_ib_device(cma_id); 400 device = iser_device_find_by_ib_device(cma_id);
391 if (!device) { 401 if (!device) {
392 iser_err("device lookup/creation failed\n"); 402 iser_err("device lookup/creation failed\n");
393 iser_connect_error(cma_id); 403 return iser_connect_error(cma_id);
394 return;
395 } 404 }
396 405
397 ib_conn = (struct iser_conn *)cma_id->context; 406 ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
400 ret = rdma_resolve_route(cma_id, 1000); 409 ret = rdma_resolve_route(cma_id, 1000);
401 if (ret) { 410 if (ret) {
402 iser_err("resolve route failed: %d\n", ret); 411 iser_err("resolve route failed: %d\n", ret);
403 iser_connect_error(cma_id); 412 return iser_connect_error(cma_id);
404 } 413 }
414
415 return 0;
405} 416}
406 417
407static void iser_route_handler(struct rdma_cm_id *cma_id) 418static int iser_route_handler(struct rdma_cm_id *cma_id)
408{ 419{
409 struct rdma_conn_param conn_param; 420 struct rdma_conn_param conn_param;
410 int ret; 421 int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
425 goto failure; 436 goto failure;
426 } 437 }
427 438
428 return; 439 return 0;
429failure: 440failure:
430 iser_connect_error(cma_id); 441 return iser_connect_error(cma_id);
431} 442}
432 443
433static void iser_connected_handler(struct rdma_cm_id *cma_id) 444static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
439 wake_up_interruptible(&ib_conn->wait); 450 wake_up_interruptible(&ib_conn->wait);
440} 451}
441 452
442static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 453static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
443{ 454{
444 struct iser_conn *ib_conn; 455 struct iser_conn *ib_conn;
456 int ret;
445 457
446 ib_conn = (struct iser_conn *)cma_id->context; 458 ib_conn = (struct iser_conn *)cma_id->context;
447 ib_conn->disc_evt_flag = 1;
448 459
449 /* getting here when the state is UP means that the conn is being * 460 /* getting here when the state is UP means that the conn is being *
450 * terminated asynchronously from the iSCSI layer's perspective. */ 461 * terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
459 ib_conn->state = ISER_CONN_DOWN; 470 ib_conn->state = ISER_CONN_DOWN;
460 wake_up_interruptible(&ib_conn->wait); 471 wake_up_interruptible(&ib_conn->wait);
461 } 472 }
473
474 ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
475 return ret;
462} 476}
463 477
464static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 478static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
465{ 479{
466 int ret = 0; 480 int ret = 0;
467 481
468 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); 482 iser_err("event %d status %d conn %p id %p\n",
483 event->event, event->status, cma_id->context, cma_id);
469 484
470 switch (event->event) { 485 switch (event->event) {
471 case RDMA_CM_EVENT_ADDR_RESOLVED: 486 case RDMA_CM_EVENT_ADDR_RESOLVED:
472 iser_addr_handler(cma_id); 487 ret = iser_addr_handler(cma_id);
473 break; 488 break;
474 case RDMA_CM_EVENT_ROUTE_RESOLVED: 489 case RDMA_CM_EVENT_ROUTE_RESOLVED:
475 iser_route_handler(cma_id); 490 ret = iser_route_handler(cma_id);
476 break; 491 break;
477 case RDMA_CM_EVENT_ESTABLISHED: 492 case RDMA_CM_EVENT_ESTABLISHED:
478 iser_connected_handler(cma_id); 493 iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
482 case RDMA_CM_EVENT_CONNECT_ERROR: 497 case RDMA_CM_EVENT_CONNECT_ERROR:
483 case RDMA_CM_EVENT_UNREACHABLE: 498 case RDMA_CM_EVENT_UNREACHABLE:
484 case RDMA_CM_EVENT_REJECTED: 499 case RDMA_CM_EVENT_REJECTED:
485 iser_err("event: %d, error: %d\n", event->event, event->status); 500 ret = iser_connect_error(cma_id);
486 iser_connect_error(cma_id);
487 break; 501 break;
488 case RDMA_CM_EVENT_DISCONNECTED: 502 case RDMA_CM_EVENT_DISCONNECTED:
489 case RDMA_CM_EVENT_DEVICE_REMOVAL: 503 case RDMA_CM_EVENT_DEVICE_REMOVAL:
490 case RDMA_CM_EVENT_ADDR_CHANGE: 504 case RDMA_CM_EVENT_ADDR_CHANGE:
491 iser_disconnected_handler(cma_id); 505 ret = iser_disconnected_handler(cma_id);
492 break; 506 break;
493 default: 507 default:
494 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 508 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
503 init_waitqueue_head(&ib_conn->wait); 517 init_waitqueue_head(&ib_conn->wait);
504 ib_conn->post_recv_buf_count = 0; 518 ib_conn->post_recv_buf_count = 0;
505 atomic_set(&ib_conn->post_send_buf_count, 0); 519 atomic_set(&ib_conn->post_send_buf_count, 0);
506 atomic_set(&ib_conn->refcount, 1); 520 atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
507 INIT_LIST_HEAD(&ib_conn->conn_list); 521 INIT_LIST_HEAD(&ib_conn->conn_list);
508 spin_lock_init(&ib_conn->lock); 522 spin_lock_init(&ib_conn->lock);
509} 523}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
531 545
532 ib_conn->state = ISER_CONN_PENDING; 546 ib_conn->state = ISER_CONN_PENDING;
533 547
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
534 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
535 (void *)ib_conn, 550 (void *)ib_conn,
536 RDMA_PS_TCP); 551 RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
568addr_failure: 583addr_failure:
569 ib_conn->state = ISER_CONN_DOWN; 584 ib_conn->state = ISER_CONN_DOWN;
570connect_failure: 585connect_failure:
571 iser_conn_release(ib_conn); 586 iser_conn_release(ib_conn, 1);
572 return err; 587 return err;
573} 588}
574 589
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
737 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 752 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
738 ISCSI_ERR_CONN_FAILED); 753 ISCSI_ERR_CONN_FAILED);
739 754
740 /* complete the termination process if disconnect event was delivered * 755 /* no more non completed posts to the QP, complete the
741 * note there are no more non completed posts to the QP */ 756 * termination process w.o worrying on disconnect event */
742 if (ib_conn->disc_evt_flag) { 757 ib_conn->state = ISER_CONN_DOWN;
743 ib_conn->state = ISER_CONN_DOWN; 758 wake_up_interruptible(&ib_conn->wait);
744 wake_up_interruptible(&ib_conn->wait);
745 }
746 } 759 }
747} 760}
748 761
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 7e18bcf05a66..46239e47a260 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -59,11 +59,11 @@ static unsigned int get_time_pit(void)
59 unsigned long flags; 59 unsigned long flags;
60 unsigned int count; 60 unsigned int count;
61 61
62 spin_lock_irqsave(&i8253_lock, flags); 62 raw_spin_lock_irqsave(&i8253_lock, flags);
63 outb_p(0x00, 0x43); 63 outb_p(0x00, 0x43);
64 count = inb_p(0x40); 64 count = inb_p(0x40);
65 count |= inb_p(0x40) << 8; 65 count |= inb_p(0x40) << 8;
66 spin_unlock_irqrestore(&i8253_lock, flags); 66 raw_spin_unlock_irqrestore(&i8253_lock, flags);
67 67
68 return count; 68 return count;
69} 69}
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 1c0b529c06aa..4afe0a3b4884 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void)
146 unsigned long flags; 146 unsigned long flags;
147 unsigned int count; 147 unsigned int count;
148 148
149 spin_lock_irqsave(&i8253_lock, flags); 149 raw_spin_lock_irqsave(&i8253_lock, flags);
150 outb_p(0x00, 0x43); 150 outb_p(0x00, 0x43);
151 count = inb_p(0x40); 151 count = inb_p(0x40);
152 count |= inb_p(0x40) << 8; 152 count |= inb_p(0x40) << 8;
153 spin_unlock_irqrestore(&i8253_lock, flags); 153 raw_spin_unlock_irqrestore(&i8253_lock, flags);
154 154
155 return count; 155 return count;
156} 156}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b1edd778639c..405febd94f24 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -54,6 +54,9 @@ static signed short btn_avb_wheel[] =
54static signed short abs_joystick[] = 54static signed short abs_joystick[] =
55{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; 55{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 };
56 56
57static signed short abs_joystick_rudder[] =
58{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, -1 };
59
57static signed short abs_avb_pegasus[] = 60static signed short abs_avb_pegasus[] =
58{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, 61{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y,
59 ABS_HAT1X, ABS_HAT1Y, -1 }; 62 ABS_HAT1X, ABS_HAT1Y, -1 };
@@ -76,8 +79,9 @@ static struct iforce_device iforce_device[] = {
76 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? 79 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
77 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, 80 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
78 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? 81 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
82 { 0x06f8, 0x0001, "Guillemot Jet Leader Force Feedback", btn_joystick, abs_joystick_rudder, ff_iforce },
79 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? 83 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
80 { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? 84 { 0x06f8, 0xa302, "Guillemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
81 { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce }, 85 { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce },
82 { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce } 86 { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce }
83}; 87};
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index b41303d3ec54..6c96631ae5d9 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -212,6 +212,7 @@ static struct usb_device_id iforce_usb_ids [] = {
212 { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ 212 { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
213 { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ 213 { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */
214 { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ 214 { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
215 { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */
215 { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ 216 { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
216 { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ 217 { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
217 { } /* Terminating entry */ 218 { } /* Terminating entry */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 64c102355f53..a8293388d019 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -143,19 +143,6 @@ config KEYBOARD_BFIN
143 To compile this driver as a module, choose M here: the 143 To compile this driver as a module, choose M here: the
144 module will be called bf54x-keys. 144 module will be called bf54x-keys.
145 145
146config KEYBOARD_CORGI
147 tristate "Corgi keyboard (deprecated)"
148 depends on PXA_SHARPSL
149 help
150 Say Y here to enable the keyboard on the Sharp Zaurus SL-C7xx
151 series of PDAs.
152
153 This driver is now deprecated, use generic GPIO based matrix
154 keyboard driver instead.
155
156 To compile this driver as a module, choose M here: the
157 module will be called corgikbd.
158
159config KEYBOARD_LKKBD 146config KEYBOARD_LKKBD
160 tristate "DECstation/VAXstation LK201/LK401 keyboard" 147 tristate "DECstation/VAXstation LK201/LK401 keyboard"
161 select SERIO 148 select SERIO
@@ -339,19 +326,6 @@ config KEYBOARD_PXA930_ROTARY
339 To compile this driver as a module, choose M here: the 326 To compile this driver as a module, choose M here: the
340 module will be called pxa930_rotary. 327 module will be called pxa930_rotary.
341 328
342config KEYBOARD_SPITZ
343 tristate "Spitz keyboard (deprecated)"
344 depends on PXA_SHARPSL
345 help
346 Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
347 SL-C3000 and Sl-C3100 series of PDAs.
348
349 This driver is now deprecated, use generic GPIO based matrix
350 keyboard driver instead.
351
352 To compile this driver as a module, choose M here: the
353 module will be called spitzkbd.
354
355config KEYBOARD_STOWAWAY 329config KEYBOARD_STOWAWAY
356 tristate "Stowaway keyboard" 330 tristate "Stowaway keyboard"
357 select SERIO 331 select SERIO
@@ -414,28 +388,6 @@ config KEYBOARD_TWL4030
414 To compile this driver as a module, choose M here: the 388 To compile this driver as a module, choose M here: the
415 module will be called twl4030_keypad. 389 module will be called twl4030_keypad.
416 390
417config KEYBOARD_TOSA
418 tristate "Tosa keyboard (deprecated)"
419 depends on MACH_TOSA
420 help
421 Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
422
423 This driver is now deprecated, use generic GPIO based matrix
424 keyboard driver instead.
425
426 To compile this driver as a module, choose M here: the
427 module will be called tosakbd.
428
429config KEYBOARD_TOSA_USE_EXT_KEYCODES
430 bool "Tosa keyboard: use extended keycodes"
431 depends on KEYBOARD_TOSA
432 help
433 Say Y here to enable the tosa keyboard driver to generate extended
434 (>= 127) keycodes. Be aware, that they can't be correctly interpreted
435 by either console keyboard driver or by Kdrive keybd driver.
436
437 Say Y only if you know, what you are doing!
438
439config KEYBOARD_XTKBD 391config KEYBOARD_XTKBD
440 tristate "XT keyboard" 392 tristate "XT keyboard"
441 select SERIO 393 select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 706c6b5ed5f4..9a74127e4d17 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
11obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o 11obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
12obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o 12obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
13obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o 13obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
14obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
15obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o 14obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
16obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o 15obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
17obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o 16obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
@@ -33,10 +32,8 @@ obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
33obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o 32obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
34obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o 33obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
35obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o 34obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
36obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
37obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o 35obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
38obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o 36obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
39obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
40obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o 37obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
41obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o 38obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
42obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o 39obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
deleted file mode 100644
index 634af6a8e6b3..000000000000
--- a/drivers/input/keyboard/corgikbd.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * Keyboard driver for Sharp Corgi models (SL-C7xx)
3 *
4 * Copyright (c) 2004-2005 Richard Purdie
5 *
6 * Based on xtkbd.c/locomkbd.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/platform_device.h>
16#include <linux/init.h>
17#include <linux/input.h>
18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22
23#include <mach/corgi.h>
24#include <mach/pxa2xx-gpio.h>
25#include <asm/hardware/scoop.h>
26
27#define KB_ROWS 8
28#define KB_COLS 12
29#define KB_ROWMASK(r) (1 << (r))
30#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
31/* zero code, 124 scancodes */
32#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
33
34#define SCAN_INTERVAL (50) /* ms */
35#define HINGE_SCAN_INTERVAL (250) /* ms */
36
37#define CORGI_KEY_CALENDER KEY_F1
38#define CORGI_KEY_ADDRESS KEY_F2
39#define CORGI_KEY_FN KEY_F3
40#define CORGI_KEY_CANCEL KEY_F4
41#define CORGI_KEY_OFF KEY_SUSPEND
42#define CORGI_KEY_EXOK KEY_F5
43#define CORGI_KEY_EXCANCEL KEY_F6
44#define CORGI_KEY_EXJOGDOWN KEY_F7
45#define CORGI_KEY_EXJOGUP KEY_F8
46#define CORGI_KEY_JAP1 KEY_LEFTCTRL
47#define CORGI_KEY_JAP2 KEY_LEFTALT
48#define CORGI_KEY_MAIL KEY_F10
49#define CORGI_KEY_OK KEY_F11
50#define CORGI_KEY_MENU KEY_F12
51
52static unsigned char corgikbd_keycode[NR_SCANCODES] = {
53 0, /* 0 */
54 0, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, 0, 0, 0, 0, 0, 0, 0, /* 1-16 */
55 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, 0, 0, 0, 0, 0, 0, 0, /* 17-32 */
56 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
57 CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
58 CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
59 CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
60 KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
61 CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
62};
63
64
65struct corgikbd {
66 unsigned char keycode[ARRAY_SIZE(corgikbd_keycode)];
67 struct input_dev *input;
68
69 spinlock_t lock;
70 struct timer_list timer;
71 struct timer_list htimer;
72
73 unsigned int suspended;
74 unsigned long suspend_jiffies;
75};
76
77#define KB_DISCHARGE_DELAY 10
78#define KB_ACTIVATE_DELAY 10
79
80/* Helper functions for reading the keyboard matrix
81 * Note: We should really be using the generic gpio functions to alter
82 * GPDR but it requires a function call per GPIO bit which is
83 * excessive when we need to access 12 bits at once, multiple times.
84 * These functions must be called within local_irq_save()/local_irq_restore()
85 * or similar.
86 */
87static inline void corgikbd_discharge_all(void)
88{
89 /* STROBE All HiZ */
90 GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
91 GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
92}
93
94static inline void corgikbd_activate_all(void)
95{
96 /* STROBE ALL -> High */
97 GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
98 GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
99
100 udelay(KB_DISCHARGE_DELAY);
101
102 /* Clear any interrupts we may have triggered when altering the GPIO lines */
103 GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
104 GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
105}
106
107static inline void corgikbd_activate_col(int col)
108{
109 /* STROBE col -> High, not col -> HiZ */
110 GPSR2 = CORGI_GPIO_STROBE_BIT(col);
111 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
112}
113
114static inline void corgikbd_reset_col(int col)
115{
116 /* STROBE col -> Low */
117 GPCR2 = CORGI_GPIO_STROBE_BIT(col);
118 /* STROBE col -> out, not col -> HiZ */
119 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
120}
121
122#define GET_ROWS_STATUS(c) (((GPLR1 & CORGI_GPIO_HIGH_SENSE_BIT) >> CORGI_GPIO_HIGH_SENSE_RSHIFT) | ((GPLR2 & CORGI_GPIO_LOW_SENSE_BIT) << CORGI_GPIO_LOW_SENSE_LSHIFT))
123
124/*
125 * The corgi keyboard only generates interrupts when a key is pressed.
126 * When a key is pressed, we enable a timer which then scans the
127 * keyboard to detect when the key is released.
128 */
129
130/* Scan the hardware keyboard and push any changes up through the input layer */
131static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data)
132{
133 unsigned int row, col, rowd;
134 unsigned long flags;
135 unsigned int num_pressed;
136
137 if (corgikbd_data->suspended)
138 return;
139
140 spin_lock_irqsave(&corgikbd_data->lock, flags);
141
142 num_pressed = 0;
143 for (col = 0; col < KB_COLS; col++) {
144 /*
145 * Discharge the output driver capacitatance
146 * in the keyboard matrix. (Yes it is significant..)
147 */
148
149 corgikbd_discharge_all();
150 udelay(KB_DISCHARGE_DELAY);
151
152 corgikbd_activate_col(col);
153 udelay(KB_ACTIVATE_DELAY);
154
155 rowd = GET_ROWS_STATUS(col);
156 for (row = 0; row < KB_ROWS; row++) {
157 unsigned int scancode, pressed;
158
159 scancode = SCANCODE(row, col);
160 pressed = rowd & KB_ROWMASK(row);
161
162 input_report_key(corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
163
164 if (pressed)
165 num_pressed++;
166
167 if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
168 && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
169 input_event(corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
170 corgikbd_data->suspend_jiffies=jiffies;
171 }
172 }
173 corgikbd_reset_col(col);
174 }
175
176 corgikbd_activate_all();
177
178 input_sync(corgikbd_data->input);
179
180 /* if any keys are pressed, enable the timer */
181 if (num_pressed)
182 mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
183
184 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
185}
186
187/*
188 * corgi keyboard interrupt handler.
189 */
190static irqreturn_t corgikbd_interrupt(int irq, void *dev_id)
191{
192 struct corgikbd *corgikbd_data = dev_id;
193
194 if (!timer_pending(&corgikbd_data->timer)) {
195 /** wait chattering delay **/
196 udelay(20);
197 corgikbd_scankeyboard(corgikbd_data);
198 }
199
200 return IRQ_HANDLED;
201}
202
203/*
204 * corgi timer checking for released keys
205 */
206static void corgikbd_timer_callback(unsigned long data)
207{
208 struct corgikbd *corgikbd_data = (struct corgikbd *) data;
209 corgikbd_scankeyboard(corgikbd_data);
210}
211
212/*
213 * The hinge switches generate no interrupt so they need to be
214 * monitored by a timer.
215 *
216 * We debounce the switches and pass them to the input system.
217 *
218 * gprr == 0x00 - Keyboard with Landscape Screen
219 * 0x08 - No Keyboard with Portrait Screen
220 * 0x0c - Keyboard and Screen Closed
221 */
222
223#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
224#define HINGE_STABLE_COUNT 2
225static int sharpsl_hinge_state;
226static int hinge_count;
227
228static void corgikbd_hinge_timer(unsigned long data)
229{
230 struct corgikbd *corgikbd_data = (struct corgikbd *) data;
231 unsigned long gprr;
232 unsigned long flags;
233
234 gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
235 gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
236 if (gprr != sharpsl_hinge_state) {
237 hinge_count = 0;
238 sharpsl_hinge_state = gprr;
239 } else if (hinge_count < HINGE_STABLE_COUNT) {
240 hinge_count++;
241 if (hinge_count >= HINGE_STABLE_COUNT) {
242 spin_lock_irqsave(&corgikbd_data->lock, flags);
243
244 input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
245 input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
246 input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
247 input_sync(corgikbd_data->input);
248
249 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
250 }
251 }
252 mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
253}
254
255#ifdef CONFIG_PM
256static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
257{
258 int i;
259 struct corgikbd *corgikbd = platform_get_drvdata(dev);
260
261 corgikbd->suspended = 1;
262 /* strobe 0 is the power key so this can't be made an input for
263 powersaving therefore i = 1 */
264 for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
265 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
266
267 return 0;
268}
269
270static int corgikbd_resume(struct platform_device *dev)
271{
272 int i;
273 struct corgikbd *corgikbd = platform_get_drvdata(dev);
274
275 for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
276 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
277
278 /* Upon resume, ignore the suspend key for a short while */
279 corgikbd->suspend_jiffies=jiffies;
280 corgikbd->suspended = 0;
281
282 return 0;
283}
284#else
285#define corgikbd_suspend NULL
286#define corgikbd_resume NULL
287#endif
288
289static int __devinit corgikbd_probe(struct platform_device *pdev)
290{
291 struct corgikbd *corgikbd;
292 struct input_dev *input_dev;
293 int i, err = -ENOMEM;
294
295 corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
296 input_dev = input_allocate_device();
297 if (!corgikbd || !input_dev)
298 goto fail;
299
300 platform_set_drvdata(pdev, corgikbd);
301
302 corgikbd->input = input_dev;
303 spin_lock_init(&corgikbd->lock);
304
305 /* Init Keyboard rescan timer */
306 init_timer(&corgikbd->timer);
307 corgikbd->timer.function = corgikbd_timer_callback;
308 corgikbd->timer.data = (unsigned long) corgikbd;
309
310 /* Init Hinge Timer */
311 init_timer(&corgikbd->htimer);
312 corgikbd->htimer.function = corgikbd_hinge_timer;
313 corgikbd->htimer.data = (unsigned long) corgikbd;
314
315 corgikbd->suspend_jiffies=jiffies;
316
317 memcpy(corgikbd->keycode, corgikbd_keycode, sizeof(corgikbd->keycode));
318
319 input_dev->name = "Corgi Keyboard";
320 input_dev->phys = "corgikbd/input0";
321 input_dev->id.bustype = BUS_HOST;
322 input_dev->id.vendor = 0x0001;
323 input_dev->id.product = 0x0001;
324 input_dev->id.version = 0x0100;
325 input_dev->dev.parent = &pdev->dev;
326
327 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
328 BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
329 input_dev->keycode = corgikbd->keycode;
330 input_dev->keycodesize = sizeof(unsigned char);
331 input_dev->keycodemax = ARRAY_SIZE(corgikbd_keycode);
332
333 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
334 set_bit(corgikbd->keycode[i], input_dev->keybit);
335 clear_bit(0, input_dev->keybit);
336 set_bit(SW_LID, input_dev->swbit);
337 set_bit(SW_TABLET_MODE, input_dev->swbit);
338 set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
339
340 err = input_register_device(corgikbd->input);
341 if (err)
342 goto fail;
343
344 mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
345
346 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
347 for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
348 pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN);
349 if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt,
350 IRQF_DISABLED | IRQF_TRIGGER_RISING,
351 "corgikbd", corgikbd))
352 printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i);
353 }
354
355 /* Set Strobe lines as outputs - set high */
356 for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
357 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
358
359 /* Setup the headphone jack as an input */
360 pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
361
362 return 0;
363
364 fail: input_free_device(input_dev);
365 kfree(corgikbd);
366 return err;
367}
368
369static int __devexit corgikbd_remove(struct platform_device *pdev)
370{
371 int i;
372 struct corgikbd *corgikbd = platform_get_drvdata(pdev);
373
374 for (i = 0; i < CORGI_KEY_SENSE_NUM; i++)
375 free_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd);
376
377 del_timer_sync(&corgikbd->htimer);
378 del_timer_sync(&corgikbd->timer);
379
380 input_unregister_device(corgikbd->input);
381
382 kfree(corgikbd);
383
384 return 0;
385}
386
387static struct platform_driver corgikbd_driver = {
388 .probe = corgikbd_probe,
389 .remove = __devexit_p(corgikbd_remove),
390 .suspend = corgikbd_suspend,
391 .resume = corgikbd_resume,
392 .driver = {
393 .name = "corgi-keyboard",
394 .owner = THIS_MODULE,
395 },
396};
397
398static int __init corgikbd_init(void)
399{
400 return platform_driver_register(&corgikbd_driver);
401}
402
403static void __exit corgikbd_exit(void)
404{
405 platform_driver_unregister(&corgikbd_driver);
406}
407
408module_init(corgikbd_init);
409module_exit(corgikbd_exit);
410
411MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
412MODULE_DESCRIPTION("Corgi Keyboard Driver");
413MODULE_LICENSE("GPL v2");
414MODULE_ALIAS("platform:corgi-keyboard");
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
deleted file mode 100644
index 13967422658c..000000000000
--- a/drivers/input/keyboard/spitzkbd.c
+++ /dev/null
@@ -1,496 +0,0 @@
1/*
2 * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
3 *
4 * Copyright (c) 2005 Richard Purdie
5 *
6 * Based on corgikbd.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/platform_device.h>
16#include <linux/init.h>
17#include <linux/input.h>
18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22
23#include <mach/spitz.h>
24#include <mach/pxa2xx-gpio.h>
25
26#define KB_ROWS 7
27#define KB_COLS 11
28#define KB_ROWMASK(r) (1 << (r))
29#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
30#define NR_SCANCODES ((KB_ROWS<<4) + 1)
31
32#define SCAN_INTERVAL (50) /* ms */
33#define HINGE_SCAN_INTERVAL (150) /* ms */
34
35#define SPITZ_KEY_CALENDER KEY_F1
36#define SPITZ_KEY_ADDRESS KEY_F2
37#define SPITZ_KEY_FN KEY_F3
38#define SPITZ_KEY_CANCEL KEY_F4
39#define SPITZ_KEY_EXOK KEY_F5
40#define SPITZ_KEY_EXCANCEL KEY_F6
41#define SPITZ_KEY_EXJOGDOWN KEY_F7
42#define SPITZ_KEY_EXJOGUP KEY_F8
43#define SPITZ_KEY_JAP1 KEY_LEFTALT
44#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
45#define SPITZ_KEY_SYNC KEY_F9
46#define SPITZ_KEY_MAIL KEY_F10
47#define SPITZ_KEY_OK KEY_F11
48#define SPITZ_KEY_MENU KEY_F12
49
50static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
51 0, /* 0 */
52 KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
53 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
54 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
55 SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
56 SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
57 SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
58 KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
59};
60
61static int spitz_strobes[] = {
62 SPITZ_GPIO_KEY_STROBE0,
63 SPITZ_GPIO_KEY_STROBE1,
64 SPITZ_GPIO_KEY_STROBE2,
65 SPITZ_GPIO_KEY_STROBE3,
66 SPITZ_GPIO_KEY_STROBE4,
67 SPITZ_GPIO_KEY_STROBE5,
68 SPITZ_GPIO_KEY_STROBE6,
69 SPITZ_GPIO_KEY_STROBE7,
70 SPITZ_GPIO_KEY_STROBE8,
71 SPITZ_GPIO_KEY_STROBE9,
72 SPITZ_GPIO_KEY_STROBE10,
73};
74
75static int spitz_senses[] = {
76 SPITZ_GPIO_KEY_SENSE0,
77 SPITZ_GPIO_KEY_SENSE1,
78 SPITZ_GPIO_KEY_SENSE2,
79 SPITZ_GPIO_KEY_SENSE3,
80 SPITZ_GPIO_KEY_SENSE4,
81 SPITZ_GPIO_KEY_SENSE5,
82 SPITZ_GPIO_KEY_SENSE6,
83};
84
85struct spitzkbd {
86 unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
87 struct input_dev *input;
88 char phys[32];
89
90 spinlock_t lock;
91 struct timer_list timer;
92 struct timer_list htimer;
93
94 unsigned int suspended;
95 unsigned long suspend_jiffies;
96};
97
98#define KB_DISCHARGE_DELAY 10
99#define KB_ACTIVATE_DELAY 10
100
101/* Helper functions for reading the keyboard matrix
102 * Note: We should really be using the generic gpio functions to alter
103 * GPDR but it requires a function call per GPIO bit which is
104 * excessive when we need to access 11 bits at once, multiple times.
105 * These functions must be called within local_irq_save()/local_irq_restore()
106 * or similar.
107 */
108static inline void spitzkbd_discharge_all(void)
109{
110 /* STROBE All HiZ */
111 GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
112 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
113 GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
114 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
115 GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
116 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
117 GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
118 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
119}
120
121static inline void spitzkbd_activate_all(void)
122{
123 /* STROBE ALL -> High */
124 GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
125 GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
126 GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
127 GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
128 GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
129 GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
130 GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
131 GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
132
133 udelay(KB_DISCHARGE_DELAY);
134
135 /* Clear any interrupts we may have triggered when altering the GPIO lines */
136 GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
137 GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
138 GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
139 GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
140}
141
142static inline void spitzkbd_activate_col(int col)
143{
144 int gpio = spitz_strobes[col];
145 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
146 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
147 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
148 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
149 GPSR(gpio) = GPIO_bit(gpio);
150 GPDR(gpio) |= GPIO_bit(gpio);
151}
152
153static inline void spitzkbd_reset_col(int col)
154{
155 int gpio = spitz_strobes[col];
156 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
157 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
158 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
159 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
160 GPCR(gpio) = GPIO_bit(gpio);
161 GPDR(gpio) |= GPIO_bit(gpio);
162}
163
164static inline int spitzkbd_get_row_status(int col)
165{
166 return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
167 | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
168 | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
169}
170
171/*
172 * The spitz keyboard only generates interrupts when a key is pressed.
173 * When a key is pressed, we enable a timer which then scans the
174 * keyboard to detect when the key is released.
175 */
176
177/* Scan the hardware keyboard and push any changes up through the input layer */
178static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data)
179{
180 unsigned int row, col, rowd;
181 unsigned long flags;
182 unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
183
184 if (spitzkbd_data->suspended)
185 return;
186
187 spin_lock_irqsave(&spitzkbd_data->lock, flags);
188
189 num_pressed = 0;
190 for (col = 0; col < KB_COLS; col++) {
191 /*
192 * Discharge the output driver capacitatance
193 * in the keyboard matrix. (Yes it is significant..)
194 */
195
196 spitzkbd_discharge_all();
197 udelay(KB_DISCHARGE_DELAY);
198
199 spitzkbd_activate_col(col);
200 udelay(KB_ACTIVATE_DELAY);
201
202 rowd = spitzkbd_get_row_status(col);
203 for (row = 0; row < KB_ROWS; row++) {
204 unsigned int scancode, pressed;
205
206 scancode = SCANCODE(row, col);
207 pressed = rowd & KB_ROWMASK(row);
208
209 input_report_key(spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
210
211 if (pressed)
212 num_pressed++;
213 }
214 spitzkbd_reset_col(col);
215 }
216
217 spitzkbd_activate_all();
218
219 input_report_key(spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
220 input_report_key(spitzkbd_data->input, KEY_SUSPEND, pwrkey);
221
222 if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
223 input_event(spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
224 spitzkbd_data->suspend_jiffies = jiffies;
225 }
226
227 input_sync(spitzkbd_data->input);
228
229 /* if any keys are pressed, enable the timer */
230 if (num_pressed)
231 mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
232
233 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
234}
235
236/*
237 * spitz keyboard interrupt handler.
238 */
239static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id)
240{
241 struct spitzkbd *spitzkbd_data = dev_id;
242
243 if (!timer_pending(&spitzkbd_data->timer)) {
244 /** wait chattering delay **/
245 udelay(20);
246 spitzkbd_scankeyboard(spitzkbd_data);
247 }
248
249 return IRQ_HANDLED;
250}
251
252/*
253 * spitz timer checking for released keys
254 */
255static void spitzkbd_timer_callback(unsigned long data)
256{
257 struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
258
259 spitzkbd_scankeyboard(spitzkbd_data);
260}
261
262/*
263 * The hinge switches generate an interrupt.
264 * We debounce the switches and pass them to the input system.
265 */
266
267static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id)
268{
269 struct spitzkbd *spitzkbd_data = dev_id;
270
271 if (!timer_pending(&spitzkbd_data->htimer))
272 mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
273
274 return IRQ_HANDLED;
275}
276
277#define HINGE_STABLE_COUNT 2
278static int sharpsl_hinge_state;
279static int hinge_count;
280
281static void spitzkbd_hinge_timer(unsigned long data)
282{
283 struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
284 unsigned long state;
285 unsigned long flags;
286
287 state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
288 state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
289 if (state != sharpsl_hinge_state) {
290 hinge_count = 0;
291 sharpsl_hinge_state = state;
292 } else if (hinge_count < HINGE_STABLE_COUNT) {
293 hinge_count++;
294 }
295
296 if (hinge_count >= HINGE_STABLE_COUNT) {
297 spin_lock_irqsave(&spitzkbd_data->lock, flags);
298
299 input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
300 input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
301 input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
302 input_sync(spitzkbd_data->input);
303
304 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
305 } else {
306 mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
307 }
308}
309
310#ifdef CONFIG_PM
311static int spitzkbd_suspend(struct platform_device *dev, pm_message_t state)
312{
313 int i;
314 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
315 spitzkbd->suspended = 1;
316
317 /* Set Strobe lines as inputs - *except* strobe line 0 leave this
318 enabled so we can detect a power button press for resume */
319 for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
320 pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
321
322 return 0;
323}
324
325static int spitzkbd_resume(struct platform_device *dev)
326{
327 int i;
328 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
329
330 for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
331 pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
332
333 /* Upon resume, ignore the suspend key for a short while */
334 spitzkbd->suspend_jiffies = jiffies;
335 spitzkbd->suspended = 0;
336
337 return 0;
338}
339#else
340#define spitzkbd_suspend NULL
341#define spitzkbd_resume NULL
342#endif
343
344static int __devinit spitzkbd_probe(struct platform_device *dev)
345{
346 struct spitzkbd *spitzkbd;
347 struct input_dev *input_dev;
348 int i, err = -ENOMEM;
349
350 spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
351 input_dev = input_allocate_device();
352 if (!spitzkbd || !input_dev)
353 goto fail;
354
355 platform_set_drvdata(dev, spitzkbd);
356 strcpy(spitzkbd->phys, "spitzkbd/input0");
357
358 spin_lock_init(&spitzkbd->lock);
359
360 /* Init Keyboard rescan timer */
361 init_timer(&spitzkbd->timer);
362 spitzkbd->timer.function = spitzkbd_timer_callback;
363 spitzkbd->timer.data = (unsigned long) spitzkbd;
364
365 /* Init Hinge Timer */
366 init_timer(&spitzkbd->htimer);
367 spitzkbd->htimer.function = spitzkbd_hinge_timer;
368 spitzkbd->htimer.data = (unsigned long) spitzkbd;
369
370 spitzkbd->suspend_jiffies = jiffies;
371
372 spitzkbd->input = input_dev;
373
374 input_dev->name = "Spitz Keyboard";
375 input_dev->phys = spitzkbd->phys;
376 input_dev->dev.parent = &dev->dev;
377
378 input_dev->id.bustype = BUS_HOST;
379 input_dev->id.vendor = 0x0001;
380 input_dev->id.product = 0x0001;
381 input_dev->id.version = 0x0100;
382
383 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
384 BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
385 input_dev->keycode = spitzkbd->keycode;
386 input_dev->keycodesize = sizeof(unsigned char);
387 input_dev->keycodemax = ARRAY_SIZE(spitzkbd_keycode);
388
389 memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
390 for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
391 set_bit(spitzkbd->keycode[i], input_dev->keybit);
392 clear_bit(0, input_dev->keybit);
393 set_bit(KEY_SUSPEND, input_dev->keybit);
394 set_bit(SW_LID, input_dev->swbit);
395 set_bit(SW_TABLET_MODE, input_dev->swbit);
396 set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
397
398 err = input_register_device(input_dev);
399 if (err)
400 goto fail;
401
402 mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
403
404 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
405 for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
406 pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
407 if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
408 IRQF_DISABLED|IRQF_TRIGGER_RISING,
409 "Spitzkbd Sense", spitzkbd))
410 printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
411 }
412
413 /* Set Strobe lines as outputs - set high */
414 for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
415 pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
416
417 pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
418 pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
419 pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
420 pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
421
422 request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt,
423 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
424 "Spitzkbd Sync", spitzkbd);
425 request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt,
426 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
427 "Spitzkbd PwrOn", spitzkbd);
428 request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr,
429 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
430 "Spitzkbd SWA", spitzkbd);
431 request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
432 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
433 "Spitzkbd SWB", spitzkbd);
434 request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
435 IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
436 "Spitzkbd HP", spitzkbd);
437
438 return 0;
439
440 fail: input_free_device(input_dev);
441 kfree(spitzkbd);
442 return err;
443}
444
445static int __devexit spitzkbd_remove(struct platform_device *dev)
446{
447 int i;
448 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
449
450 for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
451 free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
452
453 free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
454 free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
455 free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
456 free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
457 free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
458
459 del_timer_sync(&spitzkbd->htimer);
460 del_timer_sync(&spitzkbd->timer);
461
462 input_unregister_device(spitzkbd->input);
463
464 kfree(spitzkbd);
465
466 return 0;
467}
468
469static struct platform_driver spitzkbd_driver = {
470 .probe = spitzkbd_probe,
471 .remove = __devexit_p(spitzkbd_remove),
472 .suspend = spitzkbd_suspend,
473 .resume = spitzkbd_resume,
474 .driver = {
475 .name = "spitz-keyboard",
476 .owner = THIS_MODULE,
477 },
478};
479
480static int __init spitzkbd_init(void)
481{
482 return platform_driver_register(&spitzkbd_driver);
483}
484
485static void __exit spitzkbd_exit(void)
486{
487 platform_driver_unregister(&spitzkbd_driver);
488}
489
490module_init(spitzkbd_init);
491module_exit(spitzkbd_exit);
492
493MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
494MODULE_DESCRIPTION("Spitz Keyboard Driver");
495MODULE_LICENSE("GPL v2");
496MODULE_ALIAS("platform:spitz-keyboard");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
deleted file mode 100644
index 3910f269cfc8..000000000000
--- a/drivers/input/keyboard/tosakbd.c
+++ /dev/null
@@ -1,431 +0,0 @@
1/*
2 * Keyboard driver for Sharp Tosa models (SL-6000x)
3 *
4 * Copyright (c) 2005 Dirk Opfer
5 * Copyright (c) 2007 Dmitry Baryshkov
6 *
7 * Based on xtkbd.c/locomkbd.c/corgikbd.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/input.h>
19#include <linux/delay.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22
23#include <mach/gpio.h>
24#include <mach/tosa.h>
25
26#define KB_ROWMASK(r) (1 << (r))
27#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
28#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
29
30#define SCAN_INTERVAL (HZ/10)
31
32#define KB_DISCHARGE_DELAY 10
33#define KB_ACTIVATE_DELAY 10
34
35static unsigned short tosakbd_keycode[NR_SCANCODES] = {
360,
370, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
380, 0, 0, 0, 0, 0, 0, 0,
39KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
400, 0, 0, 0, 0, 0, 0, 0,
41KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
420, 0, 0, 0, 0, 0, 0, 0,
43KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
44KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
45KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
460, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
47KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
480, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
49KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
500, 0, 0,
51};
52
53struct tosakbd {
54 unsigned short keycode[ARRAY_SIZE(tosakbd_keycode)];
55 struct input_dev *input;
56 bool suspended;
57 spinlock_t lock; /* protect kbd scanning */
58 struct timer_list timer;
59};
60
61
62/* Helper functions for reading the keyboard matrix
63 * Note: We should really be using the generic gpio functions to alter
64 * GPDR but it requires a function call per GPIO bit which is
65 * excessive when we need to access 12 bits at once, multiple times.
66 * These functions must be called within local_irq_save()/local_irq_restore()
67 * or similar.
68 */
69#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
70
71static inline void tosakbd_discharge_all(void)
72{
73 /* STROBE All HiZ */
74 GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
75 GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
76 GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
77 GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
78}
79
80static inline void tosakbd_activate_all(void)
81{
82 /* STROBE ALL -> High */
83 GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
84 GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
85 GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
86 GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
87
88 udelay(KB_DISCHARGE_DELAY);
89
90 /* STATE CLEAR */
91 GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
92}
93
94static inline void tosakbd_activate_col(int col)
95{
96 if (col <= 5) {
97 /* STROBE col -> High, not col -> HiZ */
98 GPSR1 = TOSA_GPIO_STROBE_BIT(col);
99 GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
100 } else {
101 /* STROBE col -> High, not col -> HiZ */
102 GPSR2 = TOSA_GPIO_STROBE_BIT(col);
103 GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
104 }
105}
106
107static inline void tosakbd_reset_col(int col)
108{
109 if (col <= 5) {
110 /* STROBE col -> Low */
111 GPCR1 = TOSA_GPIO_STROBE_BIT(col);
112 /* STROBE col -> out, not col -> HiZ */
113 GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
114 } else {
115 /* STROBE col -> Low */
116 GPCR2 = TOSA_GPIO_STROBE_BIT(col);
117 /* STROBE col -> out, not col -> HiZ */
118 GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
119 }
120}
121/*
122 * The tosa keyboard only generates interrupts when a key is pressed.
123 * So when a key is pressed, we enable a timer. This timer scans the
124 * keyboard, and this is how we detect when the key is released.
125 */
126
127/* Scan the hardware keyboard and push any changes up through the input layer */
128static void tosakbd_scankeyboard(struct platform_device *dev)
129{
130 struct tosakbd *tosakbd = platform_get_drvdata(dev);
131 unsigned int row, col, rowd;
132 unsigned long flags;
133 unsigned int num_pressed = 0;
134
135 spin_lock_irqsave(&tosakbd->lock, flags);
136
137 if (tosakbd->suspended)
138 goto out;
139
140 for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
141 /*
142 * Discharge the output driver capacitatance
143 * in the keyboard matrix. (Yes it is significant..)
144 */
145 tosakbd_discharge_all();
146 udelay(KB_DISCHARGE_DELAY);
147
148 tosakbd_activate_col(col);
149 udelay(KB_ACTIVATE_DELAY);
150
151 rowd = GET_ROWS_STATUS(col);
152
153 for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
154 unsigned int scancode, pressed;
155 scancode = SCANCODE(row, col);
156 pressed = rowd & KB_ROWMASK(row);
157
158 if (pressed && !tosakbd->keycode[scancode])
159 dev_warn(&dev->dev,
160 "unhandled scancode: 0x%02x\n",
161 scancode);
162
163 input_report_key(tosakbd->input,
164 tosakbd->keycode[scancode],
165 pressed);
166 if (pressed)
167 num_pressed++;
168 }
169
170 tosakbd_reset_col(col);
171 }
172
173 tosakbd_activate_all();
174
175 input_sync(tosakbd->input);
176
177 /* if any keys are pressed, enable the timer */
178 if (num_pressed)
179 mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
180
181 out:
182 spin_unlock_irqrestore(&tosakbd->lock, flags);
183}
184
185/*
186 * tosa keyboard interrupt handler.
187 */
188static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
189{
190 struct platform_device *dev = __dev;
191 struct tosakbd *tosakbd = platform_get_drvdata(dev);
192
193 if (!timer_pending(&tosakbd->timer)) {
194 /** wait chattering delay **/
195 udelay(20);
196 tosakbd_scankeyboard(dev);
197 }
198
199 return IRQ_HANDLED;
200}
201
202/*
203 * tosa timer checking for released keys
204 */
205static void tosakbd_timer_callback(unsigned long __dev)
206{
207 struct platform_device *dev = (struct platform_device *)__dev;
208
209 tosakbd_scankeyboard(dev);
210}
211
212#ifdef CONFIG_PM
213static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
214{
215 struct tosakbd *tosakbd = platform_get_drvdata(dev);
216 unsigned long flags;
217
218 spin_lock_irqsave(&tosakbd->lock, flags);
219 tosakbd->suspended = true;
220 spin_unlock_irqrestore(&tosakbd->lock, flags);
221
222 del_timer_sync(&tosakbd->timer);
223
224 return 0;
225}
226
227static int tosakbd_resume(struct platform_device *dev)
228{
229 struct tosakbd *tosakbd = platform_get_drvdata(dev);
230
231 tosakbd->suspended = false;
232 tosakbd_scankeyboard(dev);
233
234 return 0;
235}
236#else
237#define tosakbd_suspend NULL
238#define tosakbd_resume NULL
239#endif
240
241static int __devinit tosakbd_probe(struct platform_device *pdev) {
242
243 int i;
244 struct tosakbd *tosakbd;
245 struct input_dev *input_dev;
246 int error;
247
248 tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
249 if (!tosakbd)
250 return -ENOMEM;
251
252 input_dev = input_allocate_device();
253 if (!input_dev) {
254 kfree(tosakbd);
255 return -ENOMEM;
256 }
257
258 platform_set_drvdata(pdev, tosakbd);
259
260 spin_lock_init(&tosakbd->lock);
261
262 /* Init Keyboard rescan timer */
263 init_timer(&tosakbd->timer);
264 tosakbd->timer.function = tosakbd_timer_callback;
265 tosakbd->timer.data = (unsigned long) pdev;
266
267 tosakbd->input = input_dev;
268
269 input_set_drvdata(input_dev, tosakbd);
270 input_dev->name = "Tosa Keyboard";
271 input_dev->phys = "tosakbd/input0";
272 input_dev->dev.parent = &pdev->dev;
273
274 input_dev->id.bustype = BUS_HOST;
275 input_dev->id.vendor = 0x0001;
276 input_dev->id.product = 0x0001;
277 input_dev->id.version = 0x0100;
278
279 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
280 input_dev->keycode = tosakbd->keycode;
281 input_dev->keycodesize = sizeof(tosakbd->keycode[0]);
282 input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
283
284 memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
285
286 for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
287 __set_bit(tosakbd->keycode[i], input_dev->keybit);
288 __clear_bit(KEY_RESERVED, input_dev->keybit);
289
290 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
291 for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
292 int gpio = TOSA_GPIO_KEY_SENSE(i);
293 int irq;
294 error = gpio_request(gpio, "tosakbd");
295 if (error < 0) {
296 printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
297 " error %d\n", gpio, error);
298 goto fail;
299 }
300
301 error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
302 if (error < 0) {
303 printk(KERN_ERR "tosakbd: failed to configure input"
304 " direction for GPIO %d, error %d\n",
305 gpio, error);
306 gpio_free(gpio);
307 goto fail;
308 }
309
310 irq = gpio_to_irq(gpio);
311 if (irq < 0) {
312 error = irq;
313 printk(KERN_ERR "gpio-keys: Unable to get irq number"
314 " for GPIO %d, error %d\n",
315 gpio, error);
316 gpio_free(gpio);
317 goto fail;
318 }
319
320 error = request_irq(irq, tosakbd_interrupt,
321 IRQF_DISABLED | IRQF_TRIGGER_RISING,
322 "tosakbd", pdev);
323
324 if (error) {
325 printk("tosakbd: Can't get IRQ: %d: error %d!\n",
326 irq, error);
327 gpio_free(gpio);
328 goto fail;
329 }
330 }
331
332 /* Set Strobe lines as outputs - set high */
333 for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
334 int gpio = TOSA_GPIO_KEY_STROBE(i);
335 error = gpio_request(gpio, "tosakbd");
336 if (error < 0) {
337 printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
338 " error %d\n", gpio, error);
339 goto fail2;
340 }
341
342 error = gpio_direction_output(gpio, 1);
343 if (error < 0) {
344 printk(KERN_ERR "tosakbd: failed to configure input"
345 " direction for GPIO %d, error %d\n",
346 gpio, error);
347 gpio_free(gpio);
348 goto fail2;
349 }
350
351 }
352
353 error = input_register_device(input_dev);
354 if (error) {
355 printk(KERN_ERR "tosakbd: Unable to register input device, "
356 "error: %d\n", error);
357 goto fail2;
358 }
359
360 printk(KERN_INFO "input: Tosa Keyboard Registered\n");
361
362 return 0;
363
364fail2:
365 while (--i >= 0)
366 gpio_free(TOSA_GPIO_KEY_STROBE(i));
367
368 i = TOSA_KEY_SENSE_NUM;
369fail:
370 while (--i >= 0) {
371 free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
372 gpio_free(TOSA_GPIO_KEY_SENSE(i));
373 }
374
375 platform_set_drvdata(pdev, NULL);
376 input_free_device(input_dev);
377 kfree(tosakbd);
378
379 return error;
380}
381
382static int __devexit tosakbd_remove(struct platform_device *dev)
383{
384 int i;
385 struct tosakbd *tosakbd = platform_get_drvdata(dev);
386
387 for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
388 gpio_free(TOSA_GPIO_KEY_STROBE(i));
389
390 for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
391 free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
392 gpio_free(TOSA_GPIO_KEY_SENSE(i));
393 }
394
395 del_timer_sync(&tosakbd->timer);
396
397 input_unregister_device(tosakbd->input);
398
399 kfree(tosakbd);
400
401 return 0;
402}
403
404static struct platform_driver tosakbd_driver = {
405 .probe = tosakbd_probe,
406 .remove = __devexit_p(tosakbd_remove),
407 .suspend = tosakbd_suspend,
408 .resume = tosakbd_resume,
409 .driver = {
410 .name = "tosa-keyboard",
411 .owner = THIS_MODULE,
412 },
413};
414
415static int __devinit tosakbd_init(void)
416{
417 return platform_driver_register(&tosakbd_driver);
418}
419
420static void __exit tosakbd_exit(void)
421{
422 platform_driver_unregister(&tosakbd_driver);
423}
424
425module_init(tosakbd_init);
426module_exit(tosakbd_exit);
427
428MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
429MODULE_DESCRIPTION("Tosa Keyboard Driver");
430MODULE_LICENSE("GPL v2");
431MODULE_ALIAS("platform:tosa-keyboard");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index ea4e1fd12651..f080dd31499b 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr");
30#include <asm/i8253.h> 30#include <asm/i8253.h>
31#else 31#else
32#include <asm/8253pit.h> 32#include <asm/8253pit.h>
33static DEFINE_SPINLOCK(i8253_lock); 33static DEFINE_RAW_SPINLOCK(i8253_lock);
34#endif 34#endif
35 35
36static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 36static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
50 if (value > 20 && value < 32767) 50 if (value > 20 && value < 32767)
51 count = PIT_TICK_RATE / value; 51 count = PIT_TICK_RATE / value;
52 52
53 spin_lock_irqsave(&i8253_lock, flags); 53 raw_spin_lock_irqsave(&i8253_lock, flags);
54 54
55 if (count) { 55 if (count) {
56 /* set command for counter 2, 2 byte write */ 56 /* set command for counter 2, 2 byte write */
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
65 outb(inb_p(0x61) & 0xFC, 0x61); 65 outb(inb_p(0x61) & 0xFC, 0x61);
66 } 66 }
67 67
68 spin_unlock_irqrestore(&i8253_lock, flags); 68 raw_spin_unlock_irqrestore(&i8253_lock, flags);
69 69
70 return 0; 70 return 0;
71} 71}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 0520c2e19927..112b4ee52ff2 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -185,7 +185,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
185 int fingers; 185 int fingers;
186 static int old_fingers; 186 static int old_fingers;
187 187
188 if (etd->fw_version_maj == 0x01) { 188 if (etd->fw_version < 0x020000) {
189 /* 189 /*
190 * byte 0: D U p1 p2 1 p3 R L 190 * byte 0: D U p1 p2 1 p3 R L
191 * byte 1: f 0 th tw x9 x8 y9 y8 191 * byte 1: f 0 th tw x9 x8 y9 y8
@@ -227,7 +227,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
227 input_report_key(dev, BTN_LEFT, packet[0] & 0x01); 227 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
228 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); 228 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
229 229
230 if ((etd->fw_version_maj == 0x01) && 230 if (etd->fw_version < 0x020000 &&
231 (etd->capabilities & ETP_CAP_HAS_ROCKER)) { 231 (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
232 /* rocker up */ 232 /* rocker up */
233 input_report_key(dev, BTN_FORWARD, packet[0] & 0x40); 233 input_report_key(dev, BTN_FORWARD, packet[0] & 0x40);
@@ -321,7 +321,7 @@ static int elantech_check_parity_v1(struct psmouse *psmouse)
321 unsigned char p1, p2, p3; 321 unsigned char p1, p2, p3;
322 322
323 /* Parity bits are placed differently */ 323 /* Parity bits are placed differently */
324 if (etd->fw_version_maj == 0x01) { 324 if (etd->fw_version < 0x020000) {
325 /* byte 0: D U p1 p2 1 p3 R L */ 325 /* byte 0: D U p1 p2 1 p3 R L */
326 p1 = (packet[0] & 0x20) >> 5; 326 p1 = (packet[0] & 0x20) >> 5;
327 p2 = (packet[0] & 0x10) >> 4; 327 p2 = (packet[0] & 0x10) >> 4;
@@ -457,7 +457,7 @@ static void elantech_set_input_params(struct psmouse *psmouse)
457 switch (etd->hw_version) { 457 switch (etd->hw_version) {
458 case 1: 458 case 1:
459 /* Rocker button */ 459 /* Rocker button */
460 if ((etd->fw_version_maj == 0x01) && 460 if (etd->fw_version < 0x020000 &&
461 (etd->capabilities & ETP_CAP_HAS_ROCKER)) { 461 (etd->capabilities & ETP_CAP_HAS_ROCKER)) {
462 __set_bit(BTN_FORWARD, dev->keybit); 462 __set_bit(BTN_FORWARD, dev->keybit);
463 __set_bit(BTN_BACK, dev->keybit); 463 __set_bit(BTN_BACK, dev->keybit);
@@ -686,15 +686,14 @@ int elantech_init(struct psmouse *psmouse)
686 pr_err("elantech.c: failed to query firmware version.\n"); 686 pr_err("elantech.c: failed to query firmware version.\n");
687 goto init_fail; 687 goto init_fail;
688 } 688 }
689 etd->fw_version_maj = param[0]; 689
690 etd->fw_version_min = param[2]; 690 etd->fw_version = (param[0] << 16) | (param[1] << 8) | param[2];
691 691
692 /* 692 /*
693 * Assume every version greater than this is new EeePC style 693 * Assume every version greater than this is new EeePC style
694 * hardware with 6 byte packets 694 * hardware with 6 byte packets
695 */ 695 */
696 if ((etd->fw_version_maj == 0x02 && etd->fw_version_min >= 0x30) || 696 if (etd->fw_version >= 0x020030) {
697 etd->fw_version_maj > 0x02) {
698 etd->hw_version = 2; 697 etd->hw_version = 2;
699 /* For now show extra debug information */ 698 /* For now show extra debug information */
700 etd->debug = 1; 699 etd->debug = 1;
@@ -704,8 +703,9 @@ int elantech_init(struct psmouse *psmouse)
704 etd->hw_version = 1; 703 etd->hw_version = 1;
705 etd->paritycheck = 1; 704 etd->paritycheck = 1;
706 } 705 }
707 pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d\n", 706
708 etd->hw_version, etd->fw_version_maj, etd->fw_version_min); 707 pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d.%d\n",
708 etd->hw_version, param[0], param[1], param[2]);
709 709
710 if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) { 710 if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
711 pr_err("elantech.c: failed to query capabilities.\n"); 711 pr_err("elantech.c: failed to query capabilities.\n");
@@ -720,8 +720,8 @@ int elantech_init(struct psmouse *psmouse)
720 * a touch action starts causing the mouse cursor or scrolled page 720 * a touch action starts causing the mouse cursor or scrolled page
721 * to jump. Enable a workaround. 721 * to jump. Enable a workaround.
722 */ 722 */
723 if (etd->fw_version_maj == 0x02 && etd->fw_version_min == 0x22) { 723 if (etd->fw_version == 0x020022) {
724 pr_info("elantech.c: firmware version 2.34 detected, " 724 pr_info("elantech.c: firmware version 2.0.34 detected, "
725 "enabling jumpy cursor workaround\n"); 725 "enabling jumpy cursor workaround\n");
726 etd->jumpy_cursor = 1; 726 etd->jumpy_cursor = 1;
727 } 727 }
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index feac5f7af966..ac57bde1bb9f 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -100,11 +100,10 @@ struct elantech_data {
100 unsigned char reg_26; 100 unsigned char reg_26;
101 unsigned char debug; 101 unsigned char debug;
102 unsigned char capabilities; 102 unsigned char capabilities;
103 unsigned char fw_version_maj;
104 unsigned char fw_version_min;
105 unsigned char hw_version;
106 unsigned char paritycheck; 103 unsigned char paritycheck;
107 unsigned char jumpy_cursor; 104 unsigned char jumpy_cursor;
105 unsigned char hw_version;
106 unsigned int fw_version;
108 unsigned char parity[256]; 107 unsigned char parity[256];
109}; 108};
110 109
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index cbc807264940..a3c97315a473 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1394,6 +1394,7 @@ static int psmouse_reconnect(struct serio *serio)
1394 struct psmouse *psmouse = serio_get_drvdata(serio); 1394 struct psmouse *psmouse = serio_get_drvdata(serio);
1395 struct psmouse *parent = NULL; 1395 struct psmouse *parent = NULL;
1396 struct serio_driver *drv = serio->drv; 1396 struct serio_driver *drv = serio->drv;
1397 unsigned char type;
1397 int rc = -1; 1398 int rc = -1;
1398 1399
1399 if (!drv || !psmouse) { 1400 if (!drv || !psmouse) {
@@ -1413,10 +1414,15 @@ static int psmouse_reconnect(struct serio *serio)
1413 if (psmouse->reconnect) { 1414 if (psmouse->reconnect) {
1414 if (psmouse->reconnect(psmouse)) 1415 if (psmouse->reconnect(psmouse))
1415 goto out; 1416 goto out;
1416 } else if (psmouse_probe(psmouse) < 0 || 1417 } else {
1417 psmouse->type != psmouse_extensions(psmouse, 1418 psmouse_reset(psmouse);
1418 psmouse_max_proto, false)) { 1419
1419 goto out; 1420 if (psmouse_probe(psmouse) < 0)
1421 goto out;
1422
1423 type = psmouse_extensions(psmouse, psmouse_max_proto, false);
1424 if (psmouse->type != type)
1425 goto out;
1420 } 1426 }
1421 1427
1422 /* ok, the device type (and capabilities) match the old one, 1428 /* ok, the device type (and capabilities) match the old one,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8a8fa4d2d6a8..6c0f1712f55b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -99,22 +99,6 @@ config TOUCHSCREEN_BITSY
99 To compile this driver as a module, choose M here: the 99 To compile this driver as a module, choose M here: the
100 module will be called h3600_ts_input. 100 module will be called h3600_ts_input.
101 101
102config TOUCHSCREEN_CORGI
103 tristate "SharpSL (Corgi and Spitz series) touchscreen driver (DEPRECATED)"
104 depends on PXA_SHARPSL
105 select CORGI_SSP_DEPRECATED
106 help
107 Say Y here to enable the driver for the touchscreen on the
108 Sharp SL-C7xx and SL-Cxx00 series of PDAs.
109
110 If unsure, say N.
111
112 To compile this driver as a module, choose M here: the
113 module will be called corgi_ts.
114
115 NOTE: this driver is deprecated, try enable SPI and generic
116 ADS7846-based touchscreen driver.
117
118config TOUCHSCREEN_DA9034 102config TOUCHSCREEN_DA9034
119 tristate "Touchscreen support for Dialog Semiconductor DA9034" 103 tristate "Touchscreen support for Dialog Semiconductor DA9034"
120 depends on PMIC_DA903X 104 depends on PMIC_DA903X
@@ -158,8 +142,8 @@ config TOUCHSCREEN_FUJITSU
158 module will be called fujitsu-ts. 142 module will be called fujitsu-ts.
159 143
160config TOUCHSCREEN_S3C2410 144config TOUCHSCREEN_S3C2410
161 tristate "Samsung S3C2410 touchscreen input driver" 145 tristate "Samsung S3C2410/generic touchscreen input driver"
162 depends on ARCH_S3C2410 146 depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
163 select S3C24XX_ADC 147 select S3C24XX_ADC
164 help 148 help
165 Say Y here if you have the s3c2410 touchscreen. 149 Say Y here if you have the s3c2410 touchscreen.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7fef7d5cca23..41145d074dec 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
12obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o 12obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
13obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o 13obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
14obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o 14obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
15obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
16obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o 15obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
17obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o 16obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
18obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o 17obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index e019d53d1ab4..0d2d7e54b465 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -156,9 +156,14 @@ struct ser_req {
156 u16 reset; 156 u16 reset;
157 u16 ref_on; 157 u16 ref_on;
158 u16 command; 158 u16 command;
159 u16 sample;
160 struct spi_message msg; 159 struct spi_message msg;
161 struct spi_transfer xfer[6]; 160 struct spi_transfer xfer[6];
161
162 /*
163 * DMA (thus cache coherency maintenance) requires the
164 * transfer buffers to live in their own cache lines.
165 */
166 u16 sample ____cacheline_aligned;
162}; 167};
163 168
164struct ad7877 { 169struct ad7877 {
@@ -182,8 +187,6 @@ struct ad7877 {
182 u8 averaging; 187 u8 averaging;
183 u8 pen_down_acc_interval; 188 u8 pen_down_acc_interval;
184 189
185 u16 conversion_data[AD7877_NR_SENSE];
186
187 struct spi_transfer xfer[AD7877_NR_SENSE + 2]; 190 struct spi_transfer xfer[AD7877_NR_SENSE + 2];
188 struct spi_message msg; 191 struct spi_message msg;
189 192
@@ -195,6 +198,12 @@ struct ad7877 {
195 spinlock_t lock; 198 spinlock_t lock;
196 struct timer_list timer; /* P: lock */ 199 struct timer_list timer; /* P: lock */
197 unsigned pending:1; /* P: lock */ 200 unsigned pending:1; /* P: lock */
201
202 /*
203 * DMA (thus cache coherency maintenance) requires the
204 * transfer buffers to live in their own cache lines.
205 */
206 u16 conversion_data[AD7877_NR_SENSE] ____cacheline_aligned;
198}; 207};
199 208
200static int gpio3; 209static int gpio3;
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
deleted file mode 100644
index 94a1919d439d..000000000000
--- a/drivers/input/touchscreen/corgi_ts.c
+++ /dev/null
@@ -1,385 +0,0 @@
1/*
2 * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
3 *
4 * Copyright (c) 2004-2005 Richard Purdie
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12
13#include <linux/delay.h>
14#include <linux/platform_device.h>
15#include <linux/init.h>
16#include <linux/input.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/irq.h>
21
22#include <mach/sharpsl.h>
23#include <mach/hardware.h>
24#include <mach/pxa2xx-gpio.h>
25
26
27#define PWR_MODE_ACTIVE 0
28#define PWR_MODE_SUSPEND 1
29
30#define X_AXIS_MAX 3830
31#define X_AXIS_MIN 150
32#define Y_AXIS_MAX 3830
33#define Y_AXIS_MIN 190
34#define PRESSURE_MIN 0
35#define PRESSURE_MAX 15000
36
37struct ts_event {
38 short pressure;
39 short x;
40 short y;
41};
42
43struct corgi_ts {
44 struct input_dev *input;
45 struct timer_list timer;
46 struct ts_event tc;
47 int pendown;
48 int power_mode;
49 int irq_gpio;
50 struct corgits_machinfo *machinfo;
51};
52
53#ifdef CONFIG_PXA25x
54#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
55#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
56#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
57#endif
58#ifdef CONFIG_PXA27x
59#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
60#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
61#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
62#endif
63
64/* ADS7846 Touch Screen Controller bit definitions */
65#define ADSCTRL_PD0 (1u << 0) /* PD0 */
66#define ADSCTRL_PD1 (1u << 1) /* PD1 */
67#define ADSCTRL_DFR (1u << 2) /* SER/DFR */
68#define ADSCTRL_MOD (1u << 3) /* Mode */
69#define ADSCTRL_ADR_SH 4 /* Address setting */
70#define ADSCTRL_STS (1u << 7) /* Start Bit */
71
72/* External Functions */
73extern unsigned int get_clk_frequency_khz(int info);
74
75static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
76{
77 unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
78
79 if (hsync_invperiod)
80 return get_clk_frequency_khz(0)*1000/hsync_invperiod;
81 else
82 return 0;
83}
84
85static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
86 unsigned int address, unsigned long wait_time)
87{
88 unsigned long timer1 = 0, timer2, pmnc = 0;
89 int pos = 0;
90
91 if (wait_time && doSend) {
92 PMNC_GET(pmnc);
93 if (!(pmnc & 0x01))
94 PMNC_SET(0x01);
95
96 /* polling HSync */
97 corgi_ts->machinfo->wait_hsync();
98 /* get CCNT */
99 CCNT(timer1);
100 }
101
102 if (doRecive)
103 pos = corgi_ssp_ads7846_get();
104
105 if (doSend) {
106 int cmd = ADSCTRL_PD0 | ADSCTRL_PD1 | (address << ADSCTRL_ADR_SH) | ADSCTRL_STS;
107 /* dummy command */
108 corgi_ssp_ads7846_put(cmd);
109 corgi_ssp_ads7846_get();
110
111 if (wait_time) {
112 /* Wait after HSync */
113 CCNT(timer2);
114 if (timer2-timer1 > wait_time) {
115 /* too slow - timeout, try again */
116 corgi_ts->machinfo->wait_hsync();
117 /* get CCNT */
118 CCNT(timer1);
119 /* Wait after HSync */
120 CCNT(timer2);
121 }
122 while (timer2 - timer1 < wait_time)
123 CCNT(timer2);
124 }
125 corgi_ssp_ads7846_put(cmd);
126 if (wait_time && !(pmnc & 0x01))
127 PMNC_SET(pmnc);
128 }
129 return pos;
130}
131
132static int read_xydata(struct corgi_ts *corgi_ts)
133{
134 unsigned int x, y, z1, z2;
135 unsigned long flags, wait_time;
136
137 /* critical section */
138 local_irq_save(flags);
139 corgi_ssp_ads7846_lock();
140 wait_time = calc_waittime(corgi_ts);
141
142 /* Y-axis */
143 sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
144
145 /* Y-axis */
146 sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
147
148 /* X-axis */
149 y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
150
151 /* Z1 */
152 x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
153
154 /* Z2 */
155 z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
156 z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
157
158 /* Power-Down Enable */
159 corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
160 corgi_ssp_ads7846_get();
161
162 corgi_ssp_ads7846_unlock();
163 local_irq_restore(flags);
164
165 if (x== 0 || y == 0 || z1 == 0 || (x * (z2 - z1) / z1) >= 15000) {
166 corgi_ts->tc.pressure = 0;
167 return 0;
168 }
169
170 corgi_ts->tc.x = x;
171 corgi_ts->tc.y = y;
172 corgi_ts->tc.pressure = (x * (z2 - z1)) / z1;
173 return 1;
174}
175
176static void new_data(struct corgi_ts *corgi_ts)
177{
178 struct input_dev *dev = corgi_ts->input;
179
180 if (corgi_ts->power_mode != PWR_MODE_ACTIVE)
181 return;
182
183 if (!corgi_ts->tc.pressure && corgi_ts->pendown == 0)
184 return;
185
186 input_report_abs(dev, ABS_X, corgi_ts->tc.x);
187 input_report_abs(dev, ABS_Y, corgi_ts->tc.y);
188 input_report_abs(dev, ABS_PRESSURE, corgi_ts->tc.pressure);
189 input_report_key(dev, BTN_TOUCH, corgi_ts->pendown);
190 input_sync(dev);
191}
192
193static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
194{
195 if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
196 /* Disable Interrupt */
197 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
198 if (read_xydata(corgi_ts)) {
199 corgi_ts->pendown = 1;
200 new_data(corgi_ts);
201 }
202 mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
203 } else {
204 if (corgi_ts->pendown == 1 || corgi_ts->pendown == 2) {
205 mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
206 corgi_ts->pendown++;
207 return;
208 }
209
210 if (corgi_ts->pendown) {
211 corgi_ts->tc.pressure = 0;
212 new_data(corgi_ts);
213 }
214
215 /* Enable Falling Edge */
216 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
217 corgi_ts->pendown = 0;
218 }
219}
220
221static void corgi_ts_timer(unsigned long data)
222{
223 struct corgi_ts *corgits_data = (struct corgi_ts *) data;
224
225 ts_interrupt_main(corgits_data, 1);
226}
227
228static irqreturn_t ts_interrupt(int irq, void *dev_id)
229{
230 struct corgi_ts *corgits_data = dev_id;
231
232 ts_interrupt_main(corgits_data, 0);
233 return IRQ_HANDLED;
234}
235
236#ifdef CONFIG_PM
237static int corgits_suspend(struct platform_device *dev, pm_message_t state)
238{
239 struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
240
241 if (corgi_ts->pendown) {
242 del_timer_sync(&corgi_ts->timer);
243 corgi_ts->tc.pressure = 0;
244 new_data(corgi_ts);
245 corgi_ts->pendown = 0;
246 }
247 corgi_ts->power_mode = PWR_MODE_SUSPEND;
248
249 corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
250
251 return 0;
252}
253
254static int corgits_resume(struct platform_device *dev)
255{
256 struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
257
258 corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
259 /* Enable Falling Edge */
260 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
261 corgi_ts->power_mode = PWR_MODE_ACTIVE;
262
263 return 0;
264}
265#else
266#define corgits_suspend NULL
267#define corgits_resume NULL
268#endif
269
270static int __devinit corgits_probe(struct platform_device *pdev)
271{
272 struct corgi_ts *corgi_ts;
273 struct input_dev *input_dev;
274 int err = -ENOMEM;
275
276 corgi_ts = kzalloc(sizeof(struct corgi_ts), GFP_KERNEL);
277 input_dev = input_allocate_device();
278 if (!corgi_ts || !input_dev)
279 goto fail1;
280
281 platform_set_drvdata(pdev, corgi_ts);
282
283 corgi_ts->machinfo = pdev->dev.platform_data;
284 corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
285
286 if (corgi_ts->irq_gpio < 0) {
287 err = -ENODEV;
288 goto fail1;
289 }
290
291 corgi_ts->input = input_dev;
292
293 init_timer(&corgi_ts->timer);
294 corgi_ts->timer.data = (unsigned long) corgi_ts;
295 corgi_ts->timer.function = corgi_ts_timer;
296
297 input_dev->name = "Corgi Touchscreen";
298 input_dev->phys = "corgits/input0";
299 input_dev->id.bustype = BUS_HOST;
300 input_dev->id.vendor = 0x0001;
301 input_dev->id.product = 0x0002;
302 input_dev->id.version = 0x0100;
303 input_dev->dev.parent = &pdev->dev;
304
305 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
306 input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
307 input_set_abs_params(input_dev, ABS_X, X_AXIS_MIN, X_AXIS_MAX, 0, 0);
308 input_set_abs_params(input_dev, ABS_Y, Y_AXIS_MIN, Y_AXIS_MAX, 0, 0);
309 input_set_abs_params(input_dev, ABS_PRESSURE, PRESSURE_MIN, PRESSURE_MAX, 0, 0);
310
311 pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
312
313 /* Initiaize ADS7846 Difference Reference mode */
314 corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
315 mdelay(5);
316 corgi_ssp_ads7846_putget((3u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
317 mdelay(5);
318 corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
319 mdelay(5);
320 corgi_ssp_ads7846_putget((5u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
321 mdelay(5);
322
323 if (request_irq(corgi_ts->irq_gpio, ts_interrupt, IRQF_DISABLED, "ts", corgi_ts)) {
324 err = -EBUSY;
325 goto fail1;
326 }
327
328 err = input_register_device(corgi_ts->input);
329 if (err)
330 goto fail2;
331
332 corgi_ts->power_mode = PWR_MODE_ACTIVE;
333
334 /* Enable Falling Edge */
335 set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
336
337 return 0;
338
339 fail2: free_irq(corgi_ts->irq_gpio, corgi_ts);
340 fail1: input_free_device(input_dev);
341 kfree(corgi_ts);
342 return err;
343}
344
345static int __devexit corgits_remove(struct platform_device *pdev)
346{
347 struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
348
349 free_irq(corgi_ts->irq_gpio, corgi_ts);
350 del_timer_sync(&corgi_ts->timer);
351 corgi_ts->machinfo->put_hsync();
352 input_unregister_device(corgi_ts->input);
353 kfree(corgi_ts);
354
355 return 0;
356}
357
358static struct platform_driver corgits_driver = {
359 .probe = corgits_probe,
360 .remove = __devexit_p(corgits_remove),
361 .suspend = corgits_suspend,
362 .resume = corgits_resume,
363 .driver = {
364 .name = "corgi-ts",
365 .owner = THIS_MODULE,
366 },
367};
368
369static int __init corgits_init(void)
370{
371 return platform_driver_register(&corgits_driver);
372}
373
374static void __exit corgits_exit(void)
375{
376 platform_driver_unregister(&corgits_driver);
377}
378
379module_init(corgits_init);
380module_exit(corgits_exit);
381
382MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
383MODULE_DESCRIPTION("Corgi TouchScreen Driver");
384MODULE_LICENSE("GPL");
385MODULE_ALIAS("platform:corgi-ts");
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 98a7d1279486..e0b7c834111d 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -37,9 +37,7 @@
37 37
38#include <plat/adc.h> 38#include <plat/adc.h>
39#include <plat/regs-adc.h> 39#include <plat/regs-adc.h>
40 40#include <plat/ts.h>
41#include <mach/regs-gpio.h>
42#include <mach/ts.h>
43 41
44#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0)) 42#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
45 43
@@ -57,6 +55,8 @@
57 S3C2410_ADCTSC_AUTO_PST | \ 55 S3C2410_ADCTSC_AUTO_PST | \
58 S3C2410_ADCTSC_XY_PST(0)) 56 S3C2410_ADCTSC_XY_PST(0))
59 57
58#define FEAT_PEN_IRQ (1 << 0) /* HAS ADCCLRINTPNDNUP */
59
60/* Per-touchscreen data. */ 60/* Per-touchscreen data. */
61 61
62/** 62/**
@@ -71,6 +71,7 @@
71 * @irq_tc: The interrupt number for pen up/down interrupt 71 * @irq_tc: The interrupt number for pen up/down interrupt
72 * @count: The number of samples collected. 72 * @count: The number of samples collected.
73 * @shift: The log2 of the maximum count to read in one go. 73 * @shift: The log2 of the maximum count to read in one go.
74 * @features: The features supported by the TSADC MOdule.
74 */ 75 */
75struct s3c2410ts { 76struct s3c2410ts {
76 struct s3c_adc_client *client; 77 struct s3c_adc_client *client;
@@ -83,26 +84,12 @@ struct s3c2410ts {
83 int irq_tc; 84 int irq_tc;
84 int count; 85 int count;
85 int shift; 86 int shift;
87 int features;
86}; 88};
87 89
88static struct s3c2410ts ts; 90static struct s3c2410ts ts;
89 91
90/** 92/**
91 * s3c2410_ts_connect - configure gpio for s3c2410 systems
92 *
93 * Configure the GPIO for the S3C2410 system, where we have external FETs
94 * connected to the device (later systems such as the S3C2440 integrate
95 * these into the device).
96*/
97static inline void s3c2410_ts_connect(void)
98{
99 s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
100 s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
101 s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
102 s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
103}
104
105/**
106 * get_down - return the down state of the pen 93 * get_down - return the down state of the pen
107 * @data0: The data read from ADCDAT0 register. 94 * @data0: The data read from ADCDAT0 register.
108 * @data1: The data read from ADCDAT1 register. 95 * @data1: The data read from ADCDAT1 register.
@@ -188,6 +175,11 @@ static irqreturn_t stylus_irq(int irq, void *dev_id)
188 else 175 else
189 dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count); 176 dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count);
190 177
178 if (ts.features & FEAT_PEN_IRQ) {
179 /* Clear pen down/up interrupt */
180 writel(0x0, ts.io + S3C64XX_ADCCLRINTPNDNUP);
181 }
182
191 return IRQ_HANDLED; 183 return IRQ_HANDLED;
192} 184}
193 185
@@ -296,9 +288,9 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
296 goto err_clk; 288 goto err_clk;
297 } 289 }
298 290
299 /* Configure the touchscreen external FETs on the S3C2410 */ 291 /* inititalise the gpio */
300 if (!platform_get_device_id(pdev)->driver_data) 292 if (info->cfg_gpio)
301 s3c2410_ts_connect(); 293 info->cfg_gpio(to_platform_device(ts.dev));
302 294
303 ts.client = s3c_adc_register(pdev, s3c24xx_ts_select, 295 ts.client = s3c_adc_register(pdev, s3c24xx_ts_select,
304 s3c24xx_ts_conversion, 1); 296 s3c24xx_ts_conversion, 1);
@@ -334,6 +326,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
334 ts.input->id.version = 0x0102; 326 ts.input->id.version = 0x0102;
335 327
336 ts.shift = info->oversampling_shift; 328 ts.shift = info->oversampling_shift;
329 ts.features = platform_get_device_id(pdev)->driver_data;
337 330
338 ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED, 331 ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED,
339 "s3c2410_ts_pen", ts.input); 332 "s3c2410_ts_pen", ts.input);
@@ -420,15 +413,14 @@ static struct dev_pm_ops s3c_ts_pmops = {
420#endif 413#endif
421 414
422static struct platform_device_id s3cts_driver_ids[] = { 415static struct platform_device_id s3cts_driver_ids[] = {
423 { "s3c2410-ts", 0 }, 416 { "s3c64xx-ts", FEAT_PEN_IRQ },
424 { "s3c2440-ts", 1 },
425 { } 417 { }
426}; 418};
427MODULE_DEVICE_TABLE(platform, s3cts_driver_ids); 419MODULE_DEVICE_TABLE(platform, s3cts_driver_ids);
428 420
429static struct platform_driver s3c_ts_driver = { 421static struct platform_driver s3c_ts_driver = {
430 .driver = { 422 .driver = {
431 .name = "s3c24xx-ts", 423 .name = "samsung-ts",
432 .owner = THIS_MODULE, 424 .owner = THIS_MODULE,
433#ifdef CONFIG_PM 425#ifdef CONFIG_PM
434 .pm = &s3c_ts_pmops, 426 .pm = &s3c_ts_pmops,
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 9f49d9065791..c53e2417e7d4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -20,6 +20,7 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/isdnif.h> 21#include <linux/isdnif.h>
22#include <net/net_namespace.h> 22#include <net/net_namespace.h>
23#include <linux/smp_lock.h>
23#include "isdn_divert.h" 24#include "isdn_divert.h"
24 25
25 26
@@ -177,9 +178,7 @@ isdn_divert_close(struct inode *ino, struct file *filep)
177/*********/ 178/*********/
178/* IOCTL */ 179/* IOCTL */
179/*********/ 180/*********/
180static int 181static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
181isdn_divert_ioctl(struct inode *inode, struct file *file,
182 uint cmd, ulong arg)
183{ 182{
184 divert_ioctl dioctl; 183 divert_ioctl dioctl;
185 int i; 184 int i;
@@ -258,6 +257,17 @@ isdn_divert_ioctl(struct inode *inode, struct file *file,
258 return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0; 257 return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0;
259} /* isdn_divert_ioctl */ 258} /* isdn_divert_ioctl */
260 259
260static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg)
261{
262 long ret;
263
264 lock_kernel();
265 ret = isdn_divert_ioctl_unlocked(file, cmd, arg);
266 unlock_kernel();
267
268 return ret;
269}
270
261static const struct file_operations isdn_fops = 271static const struct file_operations isdn_fops =
262{ 272{
263 .owner = THIS_MODULE, 273 .owner = THIS_MODULE,
@@ -265,7 +275,7 @@ static const struct file_operations isdn_fops =
265 .read = isdn_divert_read, 275 .read = isdn_divert_read,
266 .write = isdn_divert_write, 276 .write = isdn_divert_write,
267 .poll = isdn_divert_poll, 277 .poll = isdn_divert_poll,
268 .ioctl = isdn_divert_ioctl, 278 .unlocked_ioctl = isdn_divert_ioctl,
269 .open = isdn_divert_open, 279 .open = isdn_divert_open,
270 .release = isdn_divert_close, 280 .release = isdn_divert_close,
271}; 281};
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index a3d5728b6449..f2ab025ad97a 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -349,6 +349,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
349 goto disable; 349 goto disable;
350 } 350 }
351 351
352 /* If an interrupt arrived late clean up after it */
353 try_wait_for_completion(&wm831x->auxadc_done);
354
352 /* Ignore the result to allow us to soldier on without IRQ hookup */ 355 /* Ignore the result to allow us to soldier on without IRQ hookup */
353 wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); 356 wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5));
354 357
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index e400a3bed063..b5807484b4c9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -363,6 +363,10 @@ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
363 reg |= 1 << channel | WM8350_AUXADC_POLL; 363 reg |= 1 << channel | WM8350_AUXADC_POLL;
364 wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg); 364 wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg);
365 365
366 /* If a late IRQ left the completion signalled then consume
367 * the completion. */
368 try_wait_for_completion(&wm8350->auxadc_done);
369
366 /* We ignore the result of the completion and just check for a 370 /* We ignore the result of the completion and just check for a
367 * conversion result, allowing us to soldier on if the IRQ 371 * conversion result, allowing us to soldier on if the IRQ
368 * infrastructure is not set up for the chip. */ 372 * infrastructure is not set up for the chip. */
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
index e7161c4e3798..db9cd0240c6f 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmware_balloon.c
@@ -41,7 +41,7 @@
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/debugfs.h> 42#include <linux/debugfs.h>
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <asm/vmware.h> 44#include <asm/hypervisor.h>
45 45
46MODULE_AUTHOR("VMware, Inc."); 46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); 47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
@@ -767,7 +767,7 @@ static int __init vmballoon_init(void)
767 * Check if we are running on VMware's hypervisor and bail out 767 * Check if we are running on VMware's hypervisor and bail out
768 * if we are not. 768 * if we are not.
769 */ 769 */
770 if (!vmware_platform()) 770 if (x86_hyper != &x86_hyper_vmware)
771 return -ENODEV; 771 return -ENODEV;
772 772
773 vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 773 vmballoon_wq = create_freezeable_workqueue("vmmemctl");
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a6dd7da37357..336d9f553f3e 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -314,8 +314,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
314 dmabuf = (unsigned *)tmpv; 314 dmabuf = (unsigned *)tmpv;
315 } 315 }
316 316
317 flush_kernel_dcache_page(sg_page(sg));
317 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 318 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
318 dmac_flush_range((void *)sgbuffer, ((void *)sgbuffer) + amount);
319 data->bytes_xfered += amount; 319 data->bytes_xfered += amount;
320 if (size == 0) 320 if (size == 0)
321 break; 321 break;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 88be37d9e9a5..fb279f4ed8b3 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -266,7 +266,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
266 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 266 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
267 cmd->opcode, cmd->arg, cmd->flags, 267 cmd->opcode, cmd->arg, cmd->flags,
268 cmd->resp[0], cmd->resp[1], cmd->resp[2], 268 cmd->resp[0], cmd->resp[1], cmd->resp[2],
269 cmd->resp[2], cmd->error); 269 cmd->resp[3], cmd->error);
270 if (data) 270 if (data)
271 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 271 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
272 data->bytes_xfered, data->blocks, 272 data->bytes_xfered, data->blocks,
@@ -276,7 +276,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
276 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 276 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
277 stop->opcode, stop->arg, stop->flags, 277 stop->opcode, stop->arg, stop->flags,
278 stop->resp[0], stop->resp[1], stop->resp[2], 278 stop->resp[0], stop->resp[1], stop->resp[2],
279 stop->resp[2], stop->error); 279 stop->resp[3], stop->error);
280 } 280 }
281 281
282 spin_unlock_bh(&slot->host->lock); 282 spin_unlock_bh(&slot->host->lock);
@@ -569,9 +569,10 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
569{ 569{
570 struct mmc_data *data = host->data; 570 struct mmc_data *data = host->data;
571 571
572 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 572 if (data)
573 ((data->flags & MMC_DATA_WRITE) 573 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
574 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 574 ((data->flags & MMC_DATA_WRITE)
575 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
575} 576}
576 577
577static void atmci_stop_dma(struct atmel_mci *host) 578static void atmci_stop_dma(struct atmel_mci *host)
@@ -1099,8 +1100,8 @@ static void atmci_command_complete(struct atmel_mci *host,
1099 "command error: status=0x%08x\n", status); 1100 "command error: status=0x%08x\n", status);
1100 1101
1101 if (cmd->data) { 1102 if (cmd->data) {
1102 host->data = NULL;
1103 atmci_stop_dma(host); 1103 atmci_stop_dma(host);
1104 host->data = NULL;
1104 mci_writel(host, IDR, MCI_NOTBUSY 1105 mci_writel(host, IDR, MCI_NOTBUSY
1105 | MCI_TXRDY | MCI_RXRDY 1106 | MCI_TXRDY | MCI_RXRDY
1106 | ATMCI_DATA_ERROR_FLAGS); 1107 | ATMCI_DATA_ERROR_FLAGS);
@@ -1293,6 +1294,7 @@ static void atmci_tasklet_func(unsigned long priv)
1293 } else { 1294 } else {
1294 data->bytes_xfered = data->blocks * data->blksz; 1295 data->bytes_xfered = data->blocks * data->blksz;
1295 data->error = 0; 1296 data->error = 0;
1297 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
1296 } 1298 }
1297 1299
1298 if (!data->stop) { 1300 if (!data->stop) {
@@ -1751,13 +1753,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1751 ret = -ENODEV; 1753 ret = -ENODEV;
1752 if (pdata->slot[0].bus_width) { 1754 if (pdata->slot[0].bus_width) {
1753 ret = atmci_init_slot(host, &pdata->slot[0], 1755 ret = atmci_init_slot(host, &pdata->slot[0],
1754 MCI_SDCSEL_SLOT_A, 0); 1756 0, MCI_SDCSEL_SLOT_A);
1755 if (!ret) 1757 if (!ret)
1756 nr_slots++; 1758 nr_slots++;
1757 } 1759 }
1758 if (pdata->slot[1].bus_width) { 1760 if (pdata->slot[1].bus_width) {
1759 ret = atmci_init_slot(host, &pdata->slot[1], 1761 ret = atmci_init_slot(host, &pdata->slot[1],
1760 MCI_SDCSEL_SLOT_B, 1); 1762 1, MCI_SDCSEL_SLOT_B);
1761 if (!ret) 1763 if (!ret)
1762 nr_slots++; 1764 nr_slots++;
1763 } 1765 }
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 84c103a7ee13..ff115d920888 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -55,14 +55,16 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
55 host->cclk = host->mclk / (2 * (clk + 1)); 55 host->cclk = host->mclk / (2 * (clk + 1));
56 } 56 }
57 if (host->hw_designer == AMBA_VENDOR_ST) 57 if (host->hw_designer == AMBA_VENDOR_ST)
58 clk |= MCI_FCEN; /* Bug fix in ST IP block */ 58 clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
59 clk |= MCI_CLK_ENABLE; 59 clk |= MCI_CLK_ENABLE;
60 /* This hasn't proven to be worthwhile */ 60 /* This hasn't proven to be worthwhile */
61 /* clk |= MCI_CLK_PWRSAVE; */ 61 /* clk |= MCI_CLK_PWRSAVE; */
62 } 62 }
63 63
64 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 64 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
65 clk |= MCI_WIDE_BUS; 65 clk |= MCI_4BIT_BUS;
66 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
67 clk |= MCI_ST_8BIT_BUS;
66 68
67 writel(clk, host->base + MMCICLOCK); 69 writel(clk, host->base + MMCICLOCK);
68} 70}
@@ -629,7 +631,18 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
629 631
630 mmc->ops = &mmci_ops; 632 mmc->ops = &mmci_ops;
631 mmc->f_min = (host->mclk + 511) / 512; 633 mmc->f_min = (host->mclk + 511) / 512;
632 mmc->f_max = min(host->mclk, fmax); 634 /*
635 * If the platform data supplies a maximum operating
636 * frequency, this takes precedence. Else, we fall back
637 * to using the module parameter, which has a (low)
638 * default value in case it is not specified. Either
639 * value must not exceed the clock rate into the block,
640 * of course.
641 */
642 if (plat->f_max)
643 mmc->f_max = min(host->mclk, plat->f_max);
644 else
645 mmc->f_max = min(host->mclk, fmax);
633 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 646 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
634 647
635#ifdef CONFIG_REGULATOR 648#ifdef CONFIG_REGULATOR
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 1ceb9a90f59b..d77062e5e3af 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -25,9 +25,11 @@
25#define MCI_CLK_ENABLE (1 << 8) 25#define MCI_CLK_ENABLE (1 << 8)
26#define MCI_CLK_PWRSAVE (1 << 9) 26#define MCI_CLK_PWRSAVE (1 << 9)
27#define MCI_CLK_BYPASS (1 << 10) 27#define MCI_CLK_BYPASS (1 << 10)
28#define MCI_WIDE_BUS (1 << 11) 28#define MCI_4BIT_BUS (1 << 11)
29/* 8bit wide buses supported in ST Micro versions */
30#define MCI_ST_8BIT_BUS (1 << 12)
29/* HW flow control on the ST Micro version */ 31/* HW flow control on the ST Micro version */
30#define MCI_FCEN (1 << 13) 32#define MCI_ST_FCEN (1 << 13)
31 33
32#define MMCIARGUMENT 0x008 34#define MMCIARGUMENT 0x008
33#define MMCICOMMAND 0x00c 35#define MMCICOMMAND 0x00c
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0ed48959b590..e4f00e70a749 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -544,7 +544,7 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
544{ 544{
545 struct pxamci_host *host = mmc_priv(devid); 545 struct pxamci_host *host = mmc_priv(devid);
546 546
547 mmc_detect_change(devid, host->pdata->detect_delay); 547 mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
548 return IRQ_HANDLED; 548 return IRQ_HANDLED;
549} 549}
550 550
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2c..a8f0512bad38 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -674,6 +674,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
674 { ZORRO_PROD_AMERISTAR_A2065 }, 674 { ZORRO_PROD_AMERISTAR_A2065 },
675 { 0 } 675 { 0 }
676}; 676};
677MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
677 678
678static struct zorro_driver a2065_driver = { 679static struct zorro_driver a2065_driver = {
679 .name = "a2065", 680 .name = "a2065",
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index fa1a2354f5f9..4b30a46486e2 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
145 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, 145 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
146 { 0 } 146 { 0 }
147}; 147};
148MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
148 149
149static struct zorro_driver ariadne_driver = { 150static struct zorro_driver ariadne_driver = {
150 .name = "ariadne", 151 .name = "ariadne",
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index dbf81788bb40..d5d55c6a373f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2524,12 +2524,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2524 * excessive C-state transition latencies result in 2524 * excessive C-state transition latencies result in
2525 * dropped transactions. 2525 * dropped transactions.
2526 */ 2526 */
2527 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2527 pm_qos_update_request(
2528 adapter->netdev->name, 55); 2528 adapter->netdev->pm_qos_req, 55);
2529 } else { 2529 } else {
2530 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2530 pm_qos_update_request(
2531 adapter->netdev->name, 2531 adapter->netdev->pm_qos_req,
2532 PM_QOS_DEFAULT_VALUE); 2532 PM_QOS_DEFAULT_VALUE);
2533 } 2533 }
2534 } 2534 }
2535 2535
@@ -2824,8 +2824,8 @@ int e1000e_up(struct e1000_adapter *adapter)
2824 2824
2825 /* DMA latency requirement to workaround early-receive/jumbo issue */ 2825 /* DMA latency requirement to workaround early-receive/jumbo issue */
2826 if (adapter->flags & FLAG_HAS_ERT) 2826 if (adapter->flags & FLAG_HAS_ERT)
2827 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, 2827 adapter->netdev->pm_qos_req =
2828 adapter->netdev->name, 2828 pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
2829 PM_QOS_DEFAULT_VALUE); 2829 PM_QOS_DEFAULT_VALUE);
2830 2830
2831 /* hardware has been reset, we need to reload some things */ 2831 /* hardware has been reset, we need to reload some things */
@@ -2887,9 +2887,11 @@ void e1000e_down(struct e1000_adapter *adapter)
2887 e1000_clean_tx_ring(adapter); 2887 e1000_clean_tx_ring(adapter);
2888 e1000_clean_rx_ring(adapter); 2888 e1000_clean_rx_ring(adapter);
2889 2889
2890 if (adapter->flags & FLAG_HAS_ERT) 2890 if (adapter->flags & FLAG_HAS_ERT) {
2891 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, 2891 pm_qos_remove_request(
2892 adapter->netdev->name); 2892 adapter->netdev->pm_qos_req);
2893 adapter->netdev->pm_qos_req = NULL;
2894 }
2893 2895
2894 /* 2896 /*
2895 * TODO: for power management, we could drop the link and 2897 * TODO: for power management, we could drop the link and
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4e97ca182997..5d3763fb3472 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1649,6 +1649,7 @@ static void free_skb_resources(struct gfar_private *priv)
1649 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1649 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1650 priv->tx_queue[0]->tx_bd_base, 1650 priv->tx_queue[0]->tx_bd_base,
1651 priv->tx_queue[0]->tx_bd_dma_base); 1651 priv->tx_queue[0]->tx_bd_dma_base);
1652 skb_queue_purge(&priv->rx_recycle);
1652} 1653}
1653 1654
1654void gfar_start(struct net_device *dev) 1655void gfar_start(struct net_device *dev)
@@ -2088,7 +2089,6 @@ static int gfar_close(struct net_device *dev)
2088 2089
2089 disable_napi(priv); 2090 disable_napi(priv);
2090 2091
2091 skb_queue_purge(&priv->rx_recycle);
2092 cancel_work_sync(&priv->reset_task); 2092 cancel_work_sync(&priv->reset_task);
2093 stop_gfar(dev); 2093 stop_gfar(dev);
2094 2094
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 24724b4ad709..07d8e5b634f3 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, 71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
72 { 0 } 72 { 0 }
73}; 73};
74MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
74 75
75static struct zorro_driver hydra_driver = { 76static struct zorro_driver hydra_driver = {
76 .name = "hydra", 77 .name = "hydra",
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5e..f16e981812a9 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,6 +48,7 @@
48#define DRV_VERSION "1.0.0-k0" 48#define DRV_VERSION "1.0.0-k0"
49char igbvf_driver_name[] = "igbvf"; 49char igbvf_driver_name[] = "igbvf";
50const char igbvf_driver_version[] = DRV_VERSION; 50const char igbvf_driver_version[] = DRV_VERSION;
51struct pm_qos_request_list *igbvf_driver_pm_qos_req;
51static const char igbvf_driver_string[] = 52static const char igbvf_driver_string[] =
52 "Intel(R) Virtual Function Network Driver"; 53 "Intel(R) Virtual Function Network Driver";
53static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 54static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -2899,7 +2900,7 @@ static int __init igbvf_init_module(void)
2899 printk(KERN_INFO "%s\n", igbvf_copyright); 2900 printk(KERN_INFO "%s\n", igbvf_copyright);
2900 2901
2901 ret = pci_register_driver(&igbvf_driver); 2902 ret = pci_register_driver(&igbvf_driver);
2902 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, 2903 igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
2903 PM_QOS_DEFAULT_VALUE); 2904 PM_QOS_DEFAULT_VALUE);
2904 2905
2905 return ret; 2906 return ret;
@@ -2915,7 +2916,8 @@ module_init(igbvf_init_module);
2915static void __exit igbvf_exit_module(void) 2916static void __exit igbvf_exit_module(void)
2916{ 2917{
2917 pci_unregister_driver(&igbvf_driver); 2918 pci_unregister_driver(&igbvf_driver);
2918 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); 2919 pm_qos_remove_request(igbvf_driver_pm_qos_req);
2920 igbvf_driver_pm_qos_req = NULL;
2919} 2921}
2920module_exit(igbvf_exit_module); 2922module_exit(igbvf_exit_module);
2921 2923
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0cd80e4d71d9..e67691dca4ab 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -32,6 +32,7 @@ static int kszphy_config_init(struct phy_device *phydev)
32 32
33static struct phy_driver ks8001_driver = { 33static struct phy_driver ks8001_driver = {
34 .phy_id = PHY_ID_KS8001, 34 .phy_id = PHY_ID_KS8001,
35 .name = "Micrel KS8001",
35 .phy_id_mask = 0x00fffff0, 36 .phy_id_mask = 0x00fffff0,
36 .features = PHY_BASIC_FEATURES, 37 .features = PHY_BASIC_FEATURES,
37 .flags = PHY_POLL, 38 .flags = PHY_POLL,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f9f0730b53d5..5ec542dd5b50 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -187,7 +187,6 @@ tx_drop:
187 return NETDEV_TX_OK; 187 return NETDEV_TX_OK;
188 188
189rx_drop: 189rx_drop:
190 kfree_skb(skb);
191 rcv_stats->rx_dropped++; 190 rcv_stats->rx_dropped++;
192 return NETDEV_TX_OK; 191 return NETDEV_TX_OK;
193} 192}
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 99a6da464bd3..e1c2fcaa8bed 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -727,12 +727,16 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
727{ 727{
728 struct device *parent = aru->udev->dev.parent; 728 struct device *parent = aru->udev->dev.parent;
729 729
730 complete(&aru->firmware_loading_complete);
731
730 /* unbind anything failed */ 732 /* unbind anything failed */
731 if (parent) 733 if (parent)
732 down(&parent->sem); 734 down(&parent->sem);
733 device_release_driver(&aru->udev->dev); 735 device_release_driver(&aru->udev->dev);
734 if (parent) 736 if (parent)
735 up(&parent->sem); 737 up(&parent->sem);
738
739 usb_put_dev(aru->udev);
736} 740}
737 741
738static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) 742static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
@@ -761,6 +765,8 @@ static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
761 if (err) 765 if (err)
762 goto err_unrx; 766 goto err_unrx;
763 767
768 complete(&aru->firmware_loading_complete);
769 usb_put_dev(aru->udev);
764 return; 770 return;
765 771
766 err_unrx: 772 err_unrx:
@@ -858,6 +864,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
858 init_usb_anchor(&aru->tx_pending); 864 init_usb_anchor(&aru->tx_pending);
859 init_usb_anchor(&aru->tx_submitted); 865 init_usb_anchor(&aru->tx_submitted);
860 init_completion(&aru->cmd_wait); 866 init_completion(&aru->cmd_wait);
867 init_completion(&aru->firmware_loading_complete);
861 spin_lock_init(&aru->tx_urb_lock); 868 spin_lock_init(&aru->tx_urb_lock);
862 869
863 aru->tx_pending_urbs = 0; 870 aru->tx_pending_urbs = 0;
@@ -877,6 +884,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
877 if (err) 884 if (err)
878 goto err_freehw; 885 goto err_freehw;
879 886
887 usb_get_dev(aru->udev);
880 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw", 888 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
881 &aru->udev->dev, GFP_KERNEL, aru, 889 &aru->udev->dev, GFP_KERNEL, aru,
882 ar9170_usb_firmware_step2); 890 ar9170_usb_firmware_step2);
@@ -896,6 +904,9 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
896 return; 904 return;
897 905
898 aru->common.state = AR9170_IDLE; 906 aru->common.state = AR9170_IDLE;
907
908 wait_for_completion(&aru->firmware_loading_complete);
909
899 ar9170_unregister(&aru->common); 910 ar9170_unregister(&aru->common);
900 ar9170_usb_cancel_urbs(aru); 911 ar9170_usb_cancel_urbs(aru);
901 912
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index a2ce3b169ceb..919b06046eb3 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -71,6 +71,7 @@ struct ar9170_usb {
71 unsigned int tx_pending_urbs; 71 unsigned int tx_pending_urbs;
72 72
73 struct completion cmd_wait; 73 struct completion cmd_wait;
74 struct completion firmware_loading_complete;
74 int readlen; 75 int readlen;
75 u8 *readbuf; 76 u8 *readbuf;
76 77
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9b72c45a7748..2b05fe5e994c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -174,6 +174,8 @@ that only one external action is invoked at a time.
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
176 176
177struct pm_qos_request_list *ipw2100_pm_qos_req;
178
177/* Debugging stuff */ 179/* Debugging stuff */
178#ifdef CONFIG_IPW2100_DEBUG 180#ifdef CONFIG_IPW2100_DEBUG
179#define IPW2100_RX_DEBUG /* Reception debugging */ 181#define IPW2100_RX_DEBUG /* Reception debugging */
@@ -1739,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1739 /* the ipw2100 hardware really doesn't want power management delays 1741 /* the ipw2100 hardware really doesn't want power management delays
1740 * longer than 175usec 1742 * longer than 175usec
1741 */ 1743 */
1742 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175); 1744 pm_qos_update_request(ipw2100_pm_qos_req, 175);
1743 1745
1744 /* If the interrupt is enabled, turn it off... */ 1746 /* If the interrupt is enabled, turn it off... */
1745 spin_lock_irqsave(&priv->low_lock, flags); 1747 spin_lock_irqsave(&priv->low_lock, flags);
@@ -1887,8 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1887 ipw2100_disable_interrupts(priv); 1889 ipw2100_disable_interrupts(priv);
1888 spin_unlock_irqrestore(&priv->low_lock, flags); 1890 spin_unlock_irqrestore(&priv->low_lock, flags);
1889 1891
1890 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 1892 pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
1891 PM_QOS_DEFAULT_VALUE);
1892 1893
1893 /* We have to signal any supplicant if we are disassociating */ 1894 /* We have to signal any supplicant if we are disassociating */
1894 if (associated) 1895 if (associated)
@@ -6669,7 +6670,7 @@ static int __init ipw2100_init(void)
6669 if (ret) 6670 if (ret)
6670 goto out; 6671 goto out;
6671 6672
6672 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 6673 ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
6673 PM_QOS_DEFAULT_VALUE); 6674 PM_QOS_DEFAULT_VALUE);
6674#ifdef CONFIG_IPW2100_DEBUG 6675#ifdef CONFIG_IPW2100_DEBUG
6675 ipw2100_debug_level = debug; 6676 ipw2100_debug_level = debug;
@@ -6692,7 +6693,7 @@ static void __exit ipw2100_exit(void)
6692 &driver_attr_debug_level); 6693 &driver_attr_debug_level);
6693#endif 6694#endif
6694 pci_unregister_driver(&ipw2100_pci_driver); 6695 pci_unregister_driver(&ipw2100_pci_driver);
6695 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100"); 6696 pm_qos_remove_request(ipw2100_pm_qos_req);
6696} 6697}
6697 6698
6698module_init(ipw2100_init); 6699module_init(ipw2100_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 6383d9f8c9b3..f4e59ae07f8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -2621,7 +2621,9 @@ struct iwl_ssid_ie {
2621#define PROBE_OPTION_MAX_3945 4 2621#define PROBE_OPTION_MAX_3945 4
2622#define PROBE_OPTION_MAX 20 2622#define PROBE_OPTION_MAX 20
2623#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2623#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2624#define IWL_GOOD_CRC_TH cpu_to_le16(1) 2624#define IWL_GOOD_CRC_TH_DISABLED 0
2625#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2626#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2625#define IWL_MAX_SCAN_SIZE 1024 2627#define IWL_MAX_SCAN_SIZE 1024
2626#define IWL_MAX_CMD_SIZE 4096 2628#define IWL_MAX_CMD_SIZE 4096
2627#define IWL_MAX_PROBE_REQUEST 200 2629#define IWL_MAX_PROBE_REQUEST 200
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12e455a4b90e..741e65ec8301 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -813,16 +813,29 @@ static void iwl_bg_request_scan(struct work_struct *data)
813 rate = IWL_RATE_1M_PLCP; 813 rate = IWL_RATE_1M_PLCP;
814 rate_flags = RATE_MCS_CCK_MSK; 814 rate_flags = RATE_MCS_CCK_MSK;
815 } 815 }
816 scan->good_CRC_th = 0; 816 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
817 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 817 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
818 band = IEEE80211_BAND_5GHZ; 818 band = IEEE80211_BAND_5GHZ;
819 rate = IWL_RATE_6M_PLCP; 819 rate = IWL_RATE_6M_PLCP;
820 /* 820 /*
821 * If active scaning is requested but a certain channel 821 * If active scanning is requested but a certain channel is
822 * is marked passive, we can do active scanning if we 822 * marked passive, we can do active scanning if we detect
823 * detect transmissions. 823 * transmissions.
824 *
825 * There is an issue with some firmware versions that triggers
826 * a sysassert on a "good CRC threshold" of zero (== disabled),
827 * on a radar channel even though this means that we should NOT
828 * send probes.
829 *
830 * The "good CRC threshold" is the number of frames that we
831 * need to receive during our dwell time on a channel before
832 * sending out probes -- setting this to a huge value will
833 * mean we never reach it, but at the same time work around
834 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
835 * here instead of IWL_GOOD_CRC_TH_DISABLED.
824 */ 836 */
825 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 837 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
838 IWL_GOOD_CRC_TH_NEVER;
826 839
827 /* Force use of chains B and C (0x6) for scan Rx for 4965 840 /* Force use of chains B and C (0x6) for scan Rx for 4965
828 * Avoid A (0x1) because of its off-channel reception on A-band. 841 * Avoid A (0x1) because of its off-channel reception on A-band.
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b55e4f39a9e1..b74a56c48d26 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -2967,7 +2967,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2967 * is marked passive, we can do active scanning if we 2967 * is marked passive, we can do active scanning if we
2968 * detect transmissions. 2968 * detect transmissions.
2969 */ 2969 */
2970 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 2970 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2971 IWL_GOOD_CRC_TH_DISABLED;
2971 band = IEEE80211_BAND_5GHZ; 2972 band = IEEE80211_BAND_5GHZ;
2972 } else { 2973 } else {
2973 IWL_WARN(priv, "Invalid scan band count\n"); 2974 IWL_WARN(priv, "Invalid scan band count\n");
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 81c753a617ab..9548cbb5012a 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -102,6 +102,7 @@ static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
102 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, 102 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
103 { 0 } 103 { 0 }
104}; 104};
105MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
105 106
106static struct zorro_driver zorro8390_driver = { 107static struct zorro_driver zorro8390_driver = {
107 .name = "zorro8390", 108 .name = "zorro8390",
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 166b67ea622f..219f79e2210a 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -30,23 +30,7 @@
30 30
31#define OP_BUFFER_FLAGS 0 31#define OP_BUFFER_FLAGS 0
32 32
33/* 33static struct ring_buffer *op_ring_buffer;
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
48static struct ring_buffer *op_ring_buffer_read;
49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); 34DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
51 35
52static void wq_sync_buffer(struct work_struct *work); 36static void wq_sync_buffer(struct work_struct *work);
@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
68 52
69void free_cpu_buffers(void) 53void free_cpu_buffers(void)
70{ 54{
71 if (op_ring_buffer_read) 55 if (op_ring_buffer)
72 ring_buffer_free(op_ring_buffer_read); 56 ring_buffer_free(op_ring_buffer);
73 op_ring_buffer_read = NULL; 57 op_ring_buffer = NULL;
74 if (op_ring_buffer_write)
75 ring_buffer_free(op_ring_buffer_write);
76 op_ring_buffer_write = NULL;
77} 58}
78 59
79#define RB_EVENT_HDR_SIZE 4 60#define RB_EVENT_HDR_SIZE 4
@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
86 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + 67 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
87 RB_EVENT_HDR_SIZE); 68 RB_EVENT_HDR_SIZE);
88 69
89 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); 70 op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_read) 71 if (!op_ring_buffer)
91 goto fail;
92 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
93 if (!op_ring_buffer_write)
94 goto fail; 72 goto fail;
95 73
96 for_each_possible_cpu(i) { 74 for_each_possible_cpu(i) {
@@ -162,16 +140,11 @@ struct op_sample
162*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) 140*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
163{ 141{
164 entry->event = ring_buffer_lock_reserve 142 entry->event = ring_buffer_lock_reserve
165 (op_ring_buffer_write, sizeof(struct op_sample) + 143 (op_ring_buffer, sizeof(struct op_sample) +
166 size * sizeof(entry->sample->data[0])); 144 size * sizeof(entry->sample->data[0]));
167 if (entry->event) 145 if (!entry->event)
168 entry->sample = ring_buffer_event_data(entry->event);
169 else
170 entry->sample = NULL;
171
172 if (!entry->sample)
173 return NULL; 146 return NULL;
174 147 entry->sample = ring_buffer_event_data(entry->event);
175 entry->size = size; 148 entry->size = size;
176 entry->data = entry->sample->data; 149 entry->data = entry->sample->data;
177 150
@@ -180,25 +153,16 @@ struct op_sample
180 153
181int op_cpu_buffer_write_commit(struct op_entry *entry) 154int op_cpu_buffer_write_commit(struct op_entry *entry)
182{ 155{
183 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); 156 return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
184} 157}
185 158
186struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) 159struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
187{ 160{
188 struct ring_buffer_event *e; 161 struct ring_buffer_event *e;
189 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); 162 e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
190 if (e) 163 if (!e)
191 goto event;
192 if (ring_buffer_swap_cpu(op_ring_buffer_read,
193 op_ring_buffer_write,
194 cpu))
195 return NULL; 164 return NULL;
196 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
197 if (e)
198 goto event;
199 return NULL;
200 165
201event:
202 entry->event = e; 166 entry->event = e;
203 entry->sample = ring_buffer_event_data(e); 167 entry->sample = ring_buffer_event_data(e);
204 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) 168 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -209,8 +173,7 @@ event:
209 173
210unsigned long op_cpu_buffer_entries(int cpu) 174unsigned long op_cpu_buffer_entries(int cpu)
211{ 175{
212 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) 176 return ring_buffer_entries_cpu(op_ring_buffer, cpu);
213 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
214} 177}
215 178
216static int 179static int
@@ -356,8 +319,16 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
356 319
357void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 320void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
358{ 321{
359 int is_kernel = !user_mode(regs); 322 int is_kernel;
360 unsigned long pc = profile_pc(regs); 323 unsigned long pc;
324
325 if (likely(regs)) {
326 is_kernel = !user_mode(regs);
327 pc = profile_pc(regs);
328 } else {
329 is_kernel = 0; /* This value will not be used */
330 pc = ESCAPE_CODE; /* as this causes an early return. */
331 }
361 332
362 __oprofile_add_ext_sample(pc, regs, event, is_kernel); 333 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
363} 334}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dc8a0428260d..b336cd9ee7a1 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -253,22 +253,26 @@ static int __init oprofile_init(void)
253 int err; 253 int err;
254 254
255 err = oprofile_arch_init(&oprofile_ops); 255 err = oprofile_arch_init(&oprofile_ops);
256
257 if (err < 0 || timer) { 256 if (err < 0 || timer) {
258 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 257 printk(KERN_INFO "oprofile: using timer interrupt.\n");
259 oprofile_timer_init(&oprofile_ops); 258 err = oprofile_timer_init(&oprofile_ops);
259 if (err)
260 goto out_arch;
260 } 261 }
261
262 err = oprofilefs_register(); 262 err = oprofilefs_register();
263 if (err) 263 if (err)
264 oprofile_arch_exit(); 264 goto out_arch;
265 return 0;
265 266
267out_arch:
268 oprofile_arch_exit();
266 return err; 269 return err;
267} 270}
268 271
269 272
270static void __exit oprofile_exit(void) 273static void __exit oprofile_exit(void)
271{ 274{
275 oprofile_timer_exit();
272 oprofilefs_unregister(); 276 oprofilefs_unregister();
273 oprofile_arch_exit(); 277 oprofile_arch_exit();
274} 278}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index cb92f5c98c1a..47e12cb4ee8b 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -34,7 +34,8 @@ struct super_block;
34struct dentry; 34struct dentry;
35 35
36void oprofile_create_files(struct super_block *sb, struct dentry *root); 36void oprofile_create_files(struct super_block *sb, struct dentry *root);
37void oprofile_timer_init(struct oprofile_operations *ops); 37int oprofile_timer_init(struct oprofile_operations *ops);
38void oprofile_timer_exit(void);
38 39
39int oprofile_set_backtrace(unsigned long depth); 40int oprofile_set_backtrace(unsigned long depth);
40int oprofile_set_timeout(unsigned long time); 41int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 333f915568c7..dc0ae4d14dff 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -13,34 +13,94 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/profile.h> 14#include <linux/profile.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/hrtimer.h>
18#include <asm/irq_regs.h>
16#include <asm/ptrace.h> 19#include <asm/ptrace.h>
17 20
18#include "oprof.h" 21#include "oprof.h"
19 22
20static int timer_notify(struct pt_regs *regs) 23static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
24
25static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
26{
27 oprofile_add_sample(get_irq_regs(), 0);
28 hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
29 return HRTIMER_RESTART;
30}
31
32static void __oprofile_hrtimer_start(void *unused)
33{
34 struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
35
36 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
37 hrtimer->function = oprofile_hrtimer_notify;
38
39 hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
40 HRTIMER_MODE_REL_PINNED);
41}
42
43static int oprofile_hrtimer_start(void)
21{ 44{
22 oprofile_add_sample(regs, 0); 45 on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
23 return 0; 46 return 0;
24} 47}
25 48
26static int timer_start(void) 49static void __oprofile_hrtimer_stop(int cpu)
27{ 50{
28 return register_timer_hook(timer_notify); 51 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
52
53 hrtimer_cancel(hrtimer);
29} 54}
30 55
56static void oprofile_hrtimer_stop(void)
57{
58 int cpu;
59
60 for_each_online_cpu(cpu)
61 __oprofile_hrtimer_stop(cpu);
62}
31 63
32static void timer_stop(void) 64static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
65 unsigned long action, void *hcpu)
33{ 66{
34 unregister_timer_hook(timer_notify); 67 long cpu = (long) hcpu;
68
69 switch (action) {
70 case CPU_ONLINE:
71 case CPU_ONLINE_FROZEN:
72 smp_call_function_single(cpu, __oprofile_hrtimer_start,
73 NULL, 1);
74 break;
75 case CPU_DEAD:
76 case CPU_DEAD_FROZEN:
77 __oprofile_hrtimer_stop(cpu);
78 break;
79 }
80 return NOTIFY_OK;
35} 81}
36 82
83static struct notifier_block __refdata oprofile_cpu_notifier = {
84 .notifier_call = oprofile_cpu_notify,
85};
37 86
38void __init oprofile_timer_init(struct oprofile_operations *ops) 87int __init oprofile_timer_init(struct oprofile_operations *ops)
39{ 88{
89 int rc;
90
91 rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
92 if (rc)
93 return rc;
40 ops->create_files = NULL; 94 ops->create_files = NULL;
41 ops->setup = NULL; 95 ops->setup = NULL;
42 ops->shutdown = NULL; 96 ops->shutdown = NULL;
43 ops->start = timer_start; 97 ops->start = oprofile_hrtimer_start;
44 ops->stop = timer_stop; 98 ops->stop = oprofile_hrtimer_stop;
45 ops->cpu_type = "timer"; 99 ops->cpu_type = "timer";
100 return 0;
101}
102
103void __exit oprofile_timer_exit(void)
104{
105 unregister_hotcpu_notifier(&oprofile_cpu_notifier);
46} 106}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 417312528ddf..371dc564e2e4 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3626,14 +3626,15 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3626 domain_remove_one_dev_info(dmar_domain, pdev); 3626 domain_remove_one_dev_info(dmar_domain, pdev);
3627} 3627}
3628 3628
3629static int intel_iommu_map_range(struct iommu_domain *domain, 3629static int intel_iommu_map(struct iommu_domain *domain,
3630 unsigned long iova, phys_addr_t hpa, 3630 unsigned long iova, phys_addr_t hpa,
3631 size_t size, int iommu_prot) 3631 int gfp_order, int iommu_prot)
3632{ 3632{
3633 struct dmar_domain *dmar_domain = domain->priv; 3633 struct dmar_domain *dmar_domain = domain->priv;
3634 u64 max_addr; 3634 u64 max_addr;
3635 int addr_width; 3635 int addr_width;
3636 int prot = 0; 3636 int prot = 0;
3637 size_t size;
3637 int ret; 3638 int ret;
3638 3639
3639 if (iommu_prot & IOMMU_READ) 3640 if (iommu_prot & IOMMU_READ)
@@ -3643,6 +3644,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3643 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 3644 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3644 prot |= DMA_PTE_SNP; 3645 prot |= DMA_PTE_SNP;
3645 3646
3647 size = PAGE_SIZE << gfp_order;
3646 max_addr = iova + size; 3648 max_addr = iova + size;
3647 if (dmar_domain->max_addr < max_addr) { 3649 if (dmar_domain->max_addr < max_addr) {
3648 int min_agaw; 3650 int min_agaw;
@@ -3669,19 +3671,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3669 return ret; 3671 return ret;
3670} 3672}
3671 3673
3672static void intel_iommu_unmap_range(struct iommu_domain *domain, 3674static int intel_iommu_unmap(struct iommu_domain *domain,
3673 unsigned long iova, size_t size) 3675 unsigned long iova, int gfp_order)
3674{ 3676{
3675 struct dmar_domain *dmar_domain = domain->priv; 3677 struct dmar_domain *dmar_domain = domain->priv;
3676 3678 size_t size = PAGE_SIZE << gfp_order;
3677 if (!size)
3678 return;
3679 3679
3680 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3680 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3681 (iova + size - 1) >> VTD_PAGE_SHIFT); 3681 (iova + size - 1) >> VTD_PAGE_SHIFT);
3682 3682
3683 if (dmar_domain->max_addr == iova + size) 3683 if (dmar_domain->max_addr == iova + size)
3684 dmar_domain->max_addr = iova; 3684 dmar_domain->max_addr = iova;
3685
3686 return gfp_order;
3685} 3687}
3686 3688
3687static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 3689static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,8 +3716,8 @@ static struct iommu_ops intel_iommu_ops = {
3714 .domain_destroy = intel_iommu_domain_destroy, 3716 .domain_destroy = intel_iommu_domain_destroy,
3715 .attach_dev = intel_iommu_attach_device, 3717 .attach_dev = intel_iommu_attach_device,
3716 .detach_dev = intel_iommu_detach_device, 3718 .detach_dev = intel_iommu_detach_device,
3717 .map = intel_iommu_map_range, 3719 .map = intel_iommu_map,
3718 .unmap = intel_iommu_unmap_range, 3720 .unmap = intel_iommu_unmap,
3719 .iova_to_phys = intel_iommu_iova_to_phys, 3721 .iova_to_phys = intel_iommu_iova_to_phys,
3720 .domain_has_cap = intel_iommu_domain_has_cap, 3722 .domain_has_cap = intel_iommu_domain_has_cap,
3721}; 3723};
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4fe36d2e1049..19b111383f62 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -838,65 +838,11 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
838 } 838 }
839} 839}
840 840
841static int __init pci_bus_get_depth(struct pci_bus *bus)
842{
843 int depth = 0;
844 struct pci_dev *dev;
845
846 list_for_each_entry(dev, &bus->devices, bus_list) {
847 int ret;
848 struct pci_bus *b = dev->subordinate;
849 if (!b)
850 continue;
851
852 ret = pci_bus_get_depth(b);
853 if (ret + 1 > depth)
854 depth = ret + 1;
855 }
856
857 return depth;
858}
859static int __init pci_get_max_depth(void)
860{
861 int depth = 0;
862 struct pci_bus *bus;
863
864 list_for_each_entry(bus, &pci_root_buses, node) {
865 int ret;
866
867 ret = pci_bus_get_depth(bus);
868 if (ret > depth)
869 depth = ret;
870 }
871
872 return depth;
873}
874
875/*
876 * first try will not touch pci bridge res
877 * second and later try will clear small leaf bridge res
878 * will stop till to the max deepth if can not find good one
879 */
880void __init 841void __init
881pci_assign_unassigned_resources(void) 842pci_assign_unassigned_resources(void)
882{ 843{
883 struct pci_bus *bus; 844 struct pci_bus *bus;
884 int tried_times = 0;
885 enum release_type rel_type = leaf_only;
886 struct resource_list_x head, *list;
887 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
888 IORESOURCE_PREFETCH;
889 unsigned long failed_type;
890 int max_depth = pci_get_max_depth();
891 int pci_try_num;
892 845
893 head.next = NULL;
894
895 pci_try_num = max_depth + 1;
896 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
897 max_depth, pci_try_num);
898
899again:
900 /* Depth first, calculate sizes and alignments of all 846 /* Depth first, calculate sizes and alignments of all
901 subordinate buses. */ 847 subordinate buses. */
902 list_for_each_entry(bus, &pci_root_buses, node) { 848 list_for_each_entry(bus, &pci_root_buses, node) {
@@ -904,65 +850,9 @@ again:
904 } 850 }
905 /* Depth last, allocate resources and update the hardware. */ 851 /* Depth last, allocate resources and update the hardware. */
906 list_for_each_entry(bus, &pci_root_buses, node) { 852 list_for_each_entry(bus, &pci_root_buses, node) {
907 __pci_bus_assign_resources(bus, &head); 853 pci_bus_assign_resources(bus);
908 }
909 tried_times++;
910
911 /* any device complain? */
912 if (!head.next)
913 goto enable_and_dump;
914 failed_type = 0;
915 for (list = head.next; list;) {
916 failed_type |= list->flags;
917 list = list->next;
918 }
919 /*
920 * io port are tight, don't try extra
921 * or if reach the limit, don't want to try more
922 */
923 failed_type &= type_mask;
924 if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
925 free_failed_list(&head);
926 goto enable_and_dump;
927 }
928
929 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
930 tried_times + 1);
931
932 /* third times and later will not check if it is leaf */
933 if ((tried_times + 1) > 2)
934 rel_type = whole_subtree;
935
936 /*
937 * Try to release leaf bridge's resources that doesn't fit resource of
938 * child device under that bridge
939 */
940 for (list = head.next; list;) {
941 bus = list->dev->bus;
942 pci_bus_release_bridge_resources(bus, list->flags & type_mask,
943 rel_type);
944 list = list->next;
945 }
946 /* restore size and flags */
947 for (list = head.next; list;) {
948 struct resource *res = list->res;
949
950 res->start = list->start;
951 res->end = list->end;
952 res->flags = list->flags;
953 if (list->dev->subordinate)
954 res->flags = 0;
955
956 list = list->next;
957 }
958 free_failed_list(&head);
959
960 goto again;
961
962enable_and_dump:
963 /* Depth last, update the hardware. */
964 list_for_each_entry(bus, &pci_root_buses, node)
965 pci_enable_bridges(bus); 854 pci_enable_bridges(bus);
855 }
966 856
967 /* dump the resource on buses */ 857 /* dump the resource on buses */
968 list_for_each_entry(bus, &pci_root_buses, node) { 858 list_for_each_entry(bus, &pci_root_buses, node) {
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index caca50eaa0e7..d0f5ad306078 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -214,7 +214,8 @@ config PCMCIA_PXA2XX
214 depends on ARM && ARCH_PXA && PCMCIA 214 depends on ARM && ARCH_PXA && PCMCIA
215 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 215 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
216 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 216 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
217 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2) 217 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
218 || MACH_VPAC270)
218 select PCMCIA_SOC_COMMON 219 select PCMCIA_SOC_COMMON
219 help 220 help
220 Say Y here to include support for the PXA2xx PCMCIA controller 221 Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 7031d0a4f4b0..d006e8beab9c 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -69,6 +69,7 @@ pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
69pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 69pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
70pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 70pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
71pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 71pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
72pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
72 73
73obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y) 74obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
74 75
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
new file mode 100644
index 000000000000..55627eccee8e
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -0,0 +1,229 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_vpac270.c
3 *
4 * Driver for Voipac PXA270 PCMCIA and CF sockets
5 *
6 * Copyright (C) 2010
7 * Marek Vasut <marek.vasut@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17
18#include <asm/mach-types.h>
19
20#include <mach/gpio.h>
21#include <mach/vpac270.h>
22
23#include "soc_common.h"
24
25static struct pcmcia_irqs cd_irqs[] = {
26 {
27 .sock = 0,
28 .irq = IRQ_GPIO(GPIO84_VPAC270_PCMCIA_CD),
29 .str = "PCMCIA CD"
30 },
31 {
32 .sock = 1,
33 .irq = IRQ_GPIO(GPIO17_VPAC270_CF_CD),
34 .str = "CF CD"
35 },
36};
37
38static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
39{
40 int ret;
41
42 if (skt->nr == 0) {
43 ret = gpio_request(GPIO84_VPAC270_PCMCIA_CD, "PCMCIA CD");
44 if (ret)
45 goto err1;
46 ret = gpio_direction_input(GPIO84_VPAC270_PCMCIA_CD);
47 if (ret)
48 goto err2;
49
50 ret = gpio_request(GPIO35_VPAC270_PCMCIA_RDY, "PCMCIA RDY");
51 if (ret)
52 goto err2;
53 ret = gpio_direction_input(GPIO35_VPAC270_PCMCIA_RDY);
54 if (ret)
55 goto err3;
56
57 ret = gpio_request(GPIO107_VPAC270_PCMCIA_PPEN, "PCMCIA PPEN");
58 if (ret)
59 goto err3;
60 ret = gpio_direction_output(GPIO107_VPAC270_PCMCIA_PPEN, 0);
61 if (ret)
62 goto err4;
63
64 ret = gpio_request(GPIO11_VPAC270_PCMCIA_RESET, "PCMCIA RESET");
65 if (ret)
66 goto err4;
67 ret = gpio_direction_output(GPIO11_VPAC270_PCMCIA_RESET, 0);
68 if (ret)
69 goto err5;
70
71 skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
72
73 return soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
74
75err5:
76 gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
77err4:
78 gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
79err3:
80 gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
81err2:
82 gpio_free(GPIO84_VPAC270_PCMCIA_CD);
83err1:
84 return ret;
85
86 } else {
87 ret = gpio_request(GPIO17_VPAC270_CF_CD, "CF CD");
88 if (ret)
89 goto err6;
90 ret = gpio_direction_input(GPIO17_VPAC270_CF_CD);
91 if (ret)
92 goto err7;
93
94 ret = gpio_request(GPIO12_VPAC270_CF_RDY, "CF RDY");
95 if (ret)
96 goto err7;
97 ret = gpio_direction_input(GPIO12_VPAC270_CF_RDY);
98 if (ret)
99 goto err8;
100
101 ret = gpio_request(GPIO16_VPAC270_CF_RESET, "CF RESET");
102 if (ret)
103 goto err8;
104 ret = gpio_direction_output(GPIO16_VPAC270_CF_RESET, 0);
105 if (ret)
106 goto err9;
107
108 skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
109
110 return soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
111
112err9:
113 gpio_free(GPIO16_VPAC270_CF_RESET);
114err8:
115 gpio_free(GPIO12_VPAC270_CF_RDY);
116err7:
117 gpio_free(GPIO17_VPAC270_CF_CD);
118err6:
119 return ret;
120
121 }
122}
123
124static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
125{
126 gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
127 gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
128 gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
129 gpio_free(GPIO84_VPAC270_PCMCIA_CD);
130 gpio_free(GPIO16_VPAC270_CF_RESET);
131 gpio_free(GPIO12_VPAC270_CF_RDY);
132 gpio_free(GPIO17_VPAC270_CF_CD);
133}
134
135static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
136 struct pcmcia_state *state)
137{
138 if (skt->nr == 0) {
139 state->detect = !gpio_get_value(GPIO84_VPAC270_PCMCIA_CD);
140 state->ready = !!gpio_get_value(GPIO35_VPAC270_PCMCIA_RDY);
141 } else {
142 state->detect = !gpio_get_value(GPIO17_VPAC270_CF_CD);
143 state->ready = !!gpio_get_value(GPIO12_VPAC270_CF_RDY);
144 }
145 state->bvd1 = 1;
146 state->bvd2 = 1;
147 state->wrprot = 0;
148 state->vs_3v = 1;
149 state->vs_Xv = 0;
150}
151
152static int
153vpac270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
154 const socket_state_t *state)
155{
156 if (skt->nr == 0) {
157 gpio_set_value(GPIO11_VPAC270_PCMCIA_RESET,
158 (state->flags & SS_RESET));
159 gpio_set_value(GPIO107_VPAC270_PCMCIA_PPEN,
160 !(state->Vcc == 33 || state->Vcc == 50));
161 } else {
162 gpio_set_value(GPIO16_VPAC270_CF_RESET,
163 (state->flags & SS_RESET));
164 }
165
166 return 0;
167}
168
169static void vpac270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
170{
171}
172
173static void vpac270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
174{
175}
176
177static struct pcmcia_low_level vpac270_pcmcia_ops = {
178 .owner = THIS_MODULE,
179
180 .first = 0,
181 .nr = 2,
182
183 .hw_init = vpac270_pcmcia_hw_init,
184 .hw_shutdown = vpac270_pcmcia_hw_shutdown,
185
186 .socket_state = vpac270_pcmcia_socket_state,
187 .configure_socket = vpac270_pcmcia_configure_socket,
188
189 .socket_init = vpac270_pcmcia_socket_init,
190 .socket_suspend = vpac270_pcmcia_socket_suspend,
191};
192
193static struct platform_device *vpac270_pcmcia_device;
194
195static int __init vpac270_pcmcia_init(void)
196{
197 int ret;
198
199 if (!machine_is_vpac270())
200 return -ENODEV;
201
202 vpac270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
203 if (!vpac270_pcmcia_device)
204 return -ENOMEM;
205
206 ret = platform_device_add_data(vpac270_pcmcia_device,
207 &vpac270_pcmcia_ops, sizeof(vpac270_pcmcia_ops));
208
209 if (!ret)
210 ret = platform_device_add(vpac270_pcmcia_device);
211
212 if (ret)
213 platform_device_put(vpac270_pcmcia_device);
214
215 return ret;
216}
217
218static void __exit vpac270_pcmcia_exit(void)
219{
220 platform_device_unregister(vpac270_pcmcia_device);
221}
222
223module_init(vpac270_pcmcia_init);
224module_exit(vpac270_pcmcia_exit);
225
226MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
227MODULE_DESCRIPTION("PCMCIA support for Voipac PXA270");
228MODULE_ALIAS("platform:pxa2xx-pcmcia");
229MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6a1303759432..50ac047cd136 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,6 +620,16 @@ config RTC_DRV_NUC900
620 620
621comment "on-CPU RTC drivers" 621comment "on-CPU RTC drivers"
622 622
623config RTC_DRV_DAVINCI
624 tristate "TI DaVinci RTC"
625 depends on ARCH_DAVINCI_DM365
626 help
627 If you say yes here you get support for the RTC on the
628 DaVinci platforms (DM365).
629
630 This driver can also be built as a module. If so, the module
631 will be called rtc-davinci.
632
623config RTC_DRV_OMAP 633config RTC_DRV_OMAP
624 tristate "TI OMAP1" 634 tristate "TI OMAP1"
625 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX 635 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 44ef194a9573..245311a1348f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
27obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o 27obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
28obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o 28obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
29obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o 29obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
30obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
30obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o 31obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
31obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o 32obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
32obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o 33obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
new file mode 100644
index 000000000000..92a8f6cacda9
--- /dev/null
+++ b/drivers/rtc/rtc-davinci.c
@@ -0,0 +1,673 @@
1/*
2 * DaVinci Power Management and Real Time Clock Driver for TI platforms
3 *
4 * Copyright (C) 2009 Texas Instruments, Inc
5 *
6 * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/delay.h>
27#include <linux/spinlock.h>
28#include <linux/rtc.h>
29#include <linux/bcd.h>
30#include <linux/platform_device.h>
31#include <linux/io.h>
32
33/*
34 * The DaVinci RTC is a simple RTC with the following
35 * Sec: 0 - 59 : BCD count
36 * Min: 0 - 59 : BCD count
37 * Hour: 0 - 23 : BCD count
38 * Day: 0 - 0x7FFF(32767) : Binary count ( Over 89 years )
39 */
40
41/* PRTC interface registers */
42#define DAVINCI_PRTCIF_PID 0x00
43#define PRTCIF_CTLR 0x04
44#define PRTCIF_LDATA 0x08
45#define PRTCIF_UDATA 0x0C
46#define PRTCIF_INTEN 0x10
47#define PRTCIF_INTFLG 0x14
48
49/* PRTCIF_CTLR bit fields */
50#define PRTCIF_CTLR_BUSY BIT(31)
51#define PRTCIF_CTLR_SIZE BIT(25)
52#define PRTCIF_CTLR_DIR BIT(24)
53#define PRTCIF_CTLR_BENU_MSB BIT(23)
54#define PRTCIF_CTLR_BENU_3RD_BYTE BIT(22)
55#define PRTCIF_CTLR_BENU_2ND_BYTE BIT(21)
56#define PRTCIF_CTLR_BENU_LSB BIT(20)
57#define PRTCIF_CTLR_BENU_MASK (0x00F00000)
58#define PRTCIF_CTLR_BENL_MSB BIT(19)
59#define PRTCIF_CTLR_BENL_3RD_BYTE BIT(18)
60#define PRTCIF_CTLR_BENL_2ND_BYTE BIT(17)
61#define PRTCIF_CTLR_BENL_LSB BIT(16)
62#define PRTCIF_CTLR_BENL_MASK (0x000F0000)
63
64/* PRTCIF_INTEN bit fields */
65#define PRTCIF_INTEN_RTCSS BIT(1)
66#define PRTCIF_INTEN_RTCIF BIT(0)
67#define PRTCIF_INTEN_MASK (PRTCIF_INTEN_RTCSS \
68 | PRTCIF_INTEN_RTCIF)
69
70/* PRTCIF_INTFLG bit fields */
71#define PRTCIF_INTFLG_RTCSS BIT(1)
72#define PRTCIF_INTFLG_RTCIF BIT(0)
73#define PRTCIF_INTFLG_MASK (PRTCIF_INTFLG_RTCSS \
74 | PRTCIF_INTFLG_RTCIF)
75
76/* PRTC subsystem registers */
77#define PRTCSS_RTC_INTC_EXTENA1 (0x0C)
78#define PRTCSS_RTC_CTRL (0x10)
79#define PRTCSS_RTC_WDT (0x11)
80#define PRTCSS_RTC_TMR0 (0x12)
81#define PRTCSS_RTC_TMR1 (0x13)
82#define PRTCSS_RTC_CCTRL (0x14)
83#define PRTCSS_RTC_SEC (0x15)
84#define PRTCSS_RTC_MIN (0x16)
85#define PRTCSS_RTC_HOUR (0x17)
86#define PRTCSS_RTC_DAY0 (0x18)
87#define PRTCSS_RTC_DAY1 (0x19)
88#define PRTCSS_RTC_AMIN (0x1A)
89#define PRTCSS_RTC_AHOUR (0x1B)
90#define PRTCSS_RTC_ADAY0 (0x1C)
91#define PRTCSS_RTC_ADAY1 (0x1D)
92#define PRTCSS_RTC_CLKC_CNT (0x20)
93
94/* PRTCSS_RTC_INTC_EXTENA1 */
95#define PRTCSS_RTC_INTC_EXTENA1_MASK (0x07)
96
97/* PRTCSS_RTC_CTRL bit fields */
98#define PRTCSS_RTC_CTRL_WDTBUS BIT(7)
99#define PRTCSS_RTC_CTRL_WEN BIT(6)
100#define PRTCSS_RTC_CTRL_WDRT BIT(5)
101#define PRTCSS_RTC_CTRL_WDTFLG BIT(4)
102#define PRTCSS_RTC_CTRL_TE BIT(3)
103#define PRTCSS_RTC_CTRL_TIEN BIT(2)
104#define PRTCSS_RTC_CTRL_TMRFLG BIT(1)
105#define PRTCSS_RTC_CTRL_TMMD BIT(0)
106
107/* PRTCSS_RTC_CCTRL bit fields */
108#define PRTCSS_RTC_CCTRL_CALBUSY BIT(7)
109#define PRTCSS_RTC_CCTRL_DAEN BIT(5)
110#define PRTCSS_RTC_CCTRL_HAEN BIT(4)
111#define PRTCSS_RTC_CCTRL_MAEN BIT(3)
112#define PRTCSS_RTC_CCTRL_ALMFLG BIT(2)
113#define PRTCSS_RTC_CCTRL_AIEN BIT(1)
114#define PRTCSS_RTC_CCTRL_CAEN BIT(0)
115
116static DEFINE_SPINLOCK(davinci_rtc_lock);
117
118struct davinci_rtc {
119 struct rtc_device *rtc;
120 void __iomem *base;
121 resource_size_t pbase;
122 size_t base_size;
123 int irq;
124};
125
126static inline void rtcif_write(struct davinci_rtc *davinci_rtc,
127 u32 val, u32 addr)
128{
129 writel(val, davinci_rtc->base + addr);
130}
131
132static inline u32 rtcif_read(struct davinci_rtc *davinci_rtc, u32 addr)
133{
134 return readl(davinci_rtc->base + addr);
135}
136
137static inline void rtcif_wait(struct davinci_rtc *davinci_rtc)
138{
139 while (rtcif_read(davinci_rtc, PRTCIF_CTLR) & PRTCIF_CTLR_BUSY)
140 cpu_relax();
141}
142
143static inline void rtcss_write(struct davinci_rtc *davinci_rtc,
144 unsigned long val, u8 addr)
145{
146 rtcif_wait(davinci_rtc);
147
148 rtcif_write(davinci_rtc, PRTCIF_CTLR_BENL_LSB | addr, PRTCIF_CTLR);
149 rtcif_write(davinci_rtc, val, PRTCIF_LDATA);
150
151 rtcif_wait(davinci_rtc);
152}
153
154static inline u8 rtcss_read(struct davinci_rtc *davinci_rtc, u8 addr)
155{
156 rtcif_wait(davinci_rtc);
157
158 rtcif_write(davinci_rtc, PRTCIF_CTLR_DIR | PRTCIF_CTLR_BENL_LSB | addr,
159 PRTCIF_CTLR);
160
161 rtcif_wait(davinci_rtc);
162
163 return rtcif_read(davinci_rtc, PRTCIF_LDATA);
164}
165
166static inline void davinci_rtcss_calendar_wait(struct davinci_rtc *davinci_rtc)
167{
168 while (rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
169 PRTCSS_RTC_CCTRL_CALBUSY)
170 cpu_relax();
171}
172
173static irqreturn_t davinci_rtc_interrupt(int irq, void *class_dev)
174{
175 struct davinci_rtc *davinci_rtc = class_dev;
176 unsigned long events = 0;
177 u32 irq_flg;
178 u8 alm_irq, tmr_irq;
179 u8 rtc_ctrl, rtc_cctrl;
180 int ret = IRQ_NONE;
181
182 irq_flg = rtcif_read(davinci_rtc, PRTCIF_INTFLG) &
183 PRTCIF_INTFLG_RTCSS;
184
185 alm_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
186 PRTCSS_RTC_CCTRL_ALMFLG;
187
188 tmr_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL) &
189 PRTCSS_RTC_CTRL_TMRFLG;
190
191 if (irq_flg) {
192 if (alm_irq) {
193 events |= RTC_IRQF | RTC_AF;
194 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
195 rtc_cctrl |= PRTCSS_RTC_CCTRL_ALMFLG;
196 rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
197 } else if (tmr_irq) {
198 events |= RTC_IRQF | RTC_PF;
199 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
200 rtc_ctrl |= PRTCSS_RTC_CTRL_TMRFLG;
201 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
202 }
203
204 rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS,
205 PRTCIF_INTFLG);
206 rtc_update_irq(davinci_rtc->rtc, 1, events);
207
208 ret = IRQ_HANDLED;
209 }
210
211 return ret;
212}
213
214static int
215davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
216{
217 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
218 u8 rtc_ctrl;
219 unsigned long flags;
220 int ret = 0;
221
222 spin_lock_irqsave(&davinci_rtc_lock, flags);
223
224 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
225
226 switch (cmd) {
227 case RTC_WIE_ON:
228 rtc_ctrl |= PRTCSS_RTC_CTRL_WEN | PRTCSS_RTC_CTRL_WDTFLG;
229 break;
230 case RTC_WIE_OFF:
231 rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
232 break;
233 case RTC_UIE_OFF:
234 case RTC_UIE_ON:
235 ret = -ENOTTY;
236 break;
237 default:
238 ret = -ENOIOCTLCMD;
239 }
240
241 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
242
243 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
244
245 return ret;
246}
247
248static int convertfromdays(u16 days, struct rtc_time *tm)
249{
250 int tmp_days, year, mon;
251
252 for (year = 2000;; year++) {
253 tmp_days = rtc_year_days(1, 12, year);
254 if (days >= tmp_days)
255 days -= tmp_days;
256 else {
257 for (mon = 0;; mon++) {
258 tmp_days = rtc_month_days(mon, year);
259 if (days >= tmp_days) {
260 days -= tmp_days;
261 } else {
262 tm->tm_year = year - 1900;
263 tm->tm_mon = mon;
264 tm->tm_mday = days + 1;
265 break;
266 }
267 }
268 break;
269 }
270 }
271 return 0;
272}
273
274static int convert2days(u16 *days, struct rtc_time *tm)
275{
276 int i;
277 *days = 0;
278
279 /* epoch == 1900 */
280 if (tm->tm_year < 100 || tm->tm_year > 199)
281 return -EINVAL;
282
283 for (i = 2000; i < 1900 + tm->tm_year; i++)
284 *days += rtc_year_days(1, 12, i);
285
286 *days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
287
288 return 0;
289}
290
291static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
292{
293 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
294 u16 days = 0;
295 u8 day0, day1;
296 unsigned long flags;
297
298 spin_lock_irqsave(&davinci_rtc_lock, flags);
299
300 davinci_rtcss_calendar_wait(davinci_rtc);
301 tm->tm_sec = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_SEC));
302
303 davinci_rtcss_calendar_wait(davinci_rtc);
304 tm->tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_MIN));
305
306 davinci_rtcss_calendar_wait(davinci_rtc);
307 tm->tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_HOUR));
308
309 davinci_rtcss_calendar_wait(davinci_rtc);
310 day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY0);
311
312 davinci_rtcss_calendar_wait(davinci_rtc);
313 day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY1);
314
315 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
316
317 days |= day1;
318 days <<= 8;
319 days |= day0;
320
321 if (convertfromdays(days, tm) < 0)
322 return -EINVAL;
323
324 return 0;
325}
326
327static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
328{
329 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
330 u16 days;
331 u8 rtc_cctrl;
332 unsigned long flags;
333
334 if (convert2days(&days, tm) < 0)
335 return -EINVAL;
336
337 spin_lock_irqsave(&davinci_rtc_lock, flags);
338
339 davinci_rtcss_calendar_wait(davinci_rtc);
340 rtcss_write(davinci_rtc, bin2bcd(tm->tm_sec), PRTCSS_RTC_SEC);
341
342 davinci_rtcss_calendar_wait(davinci_rtc);
343 rtcss_write(davinci_rtc, bin2bcd(tm->tm_min), PRTCSS_RTC_MIN);
344
345 davinci_rtcss_calendar_wait(davinci_rtc);
346 rtcss_write(davinci_rtc, bin2bcd(tm->tm_hour), PRTCSS_RTC_HOUR);
347
348 davinci_rtcss_calendar_wait(davinci_rtc);
349 rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_DAY0);
350
351 davinci_rtcss_calendar_wait(davinci_rtc);
352 rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_DAY1);
353
354 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
355 rtc_cctrl |= PRTCSS_RTC_CCTRL_CAEN;
356 rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
357
358 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
359
360 return 0;
361}
362
363static int davinci_rtc_alarm_irq_enable(struct device *dev,
364 unsigned int enabled)
365{
366 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
367 unsigned long flags;
368 u8 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
369
370 spin_lock_irqsave(&davinci_rtc_lock, flags);
371
372 if (enabled)
373 rtc_cctrl |= PRTCSS_RTC_CCTRL_DAEN |
374 PRTCSS_RTC_CCTRL_HAEN |
375 PRTCSS_RTC_CCTRL_MAEN |
376 PRTCSS_RTC_CCTRL_ALMFLG |
377 PRTCSS_RTC_CCTRL_AIEN;
378 else
379 rtc_cctrl &= ~PRTCSS_RTC_CCTRL_AIEN;
380
381 davinci_rtcss_calendar_wait(davinci_rtc);
382 rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
383
384 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
385
386 return 0;
387}
388
389static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
390{
391 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
392 u16 days = 0;
393 u8 day0, day1;
394 unsigned long flags;
395
396 spin_lock_irqsave(&davinci_rtc_lock, flags);
397
398 davinci_rtcss_calendar_wait(davinci_rtc);
399 alm->time.tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AMIN));
400
401 davinci_rtcss_calendar_wait(davinci_rtc);
402 alm->time.tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AHOUR));
403
404 davinci_rtcss_calendar_wait(davinci_rtc);
405 day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY0);
406
407 davinci_rtcss_calendar_wait(davinci_rtc);
408 day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY1);
409
410 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
411 days |= day1;
412 days <<= 8;
413 days |= day0;
414
415 if (convertfromdays(days, &alm->time) < 0)
416 return -EINVAL;
417
418 alm->pending = !!(rtcss_read(davinci_rtc,
419 PRTCSS_RTC_CCTRL) &
420 PRTCSS_RTC_CCTRL_AIEN);
421 alm->enabled = alm->pending && device_may_wakeup(dev);
422
423 return 0;
424}
425
426static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
427{
428 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
429 unsigned long flags;
430 u16 days;
431
432 if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
433 && alm->time.tm_year < 0) {
434 struct rtc_time tm;
435 unsigned long now, then;
436
437 davinci_rtc_read_time(dev, &tm);
438 rtc_tm_to_time(&tm, &now);
439
440 alm->time.tm_mday = tm.tm_mday;
441 alm->time.tm_mon = tm.tm_mon;
442 alm->time.tm_year = tm.tm_year;
443 rtc_tm_to_time(&alm->time, &then);
444
445 if (then < now) {
446 rtc_time_to_tm(now + 24 * 60 * 60, &tm);
447 alm->time.tm_mday = tm.tm_mday;
448 alm->time.tm_mon = tm.tm_mon;
449 alm->time.tm_year = tm.tm_year;
450 }
451 }
452
453 if (convert2days(&days, &alm->time) < 0)
454 return -EINVAL;
455
456 spin_lock_irqsave(&davinci_rtc_lock, flags);
457
458 davinci_rtcss_calendar_wait(davinci_rtc);
459 rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_min), PRTCSS_RTC_AMIN);
460
461 davinci_rtcss_calendar_wait(davinci_rtc);
462 rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_hour), PRTCSS_RTC_AHOUR);
463
464 davinci_rtcss_calendar_wait(davinci_rtc);
465 rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_ADAY0);
466
467 davinci_rtcss_calendar_wait(davinci_rtc);
468 rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_ADAY1);
469
470 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
471
472 return 0;
473}
474
475static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
476{
477 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
478 unsigned long flags;
479 u8 rtc_ctrl;
480
481 spin_lock_irqsave(&davinci_rtc_lock, flags);
482
483 rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
484
485 if (enabled) {
486 while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
487 & PRTCSS_RTC_CTRL_WDTBUS)
488 cpu_relax();
489
490 rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
491 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
492
493 rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
494
495 rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
496 PRTCSS_RTC_CTRL_TMMD |
497 PRTCSS_RTC_CTRL_TMRFLG;
498 } else
499 rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
500
501 rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
502
503 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
504
505 return 0;
506}
507
508static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
509{
510 struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
511 unsigned long flags;
512 u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
513
514 spin_lock_irqsave(&davinci_rtc_lock, flags);
515
516 rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
517 rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
518
519 spin_unlock_irqrestore(&davinci_rtc_lock, flags);
520
521 return 0;
522}
523
524static struct rtc_class_ops davinci_rtc_ops = {
525 .ioctl = davinci_rtc_ioctl,
526 .read_time = davinci_rtc_read_time,
527 .set_time = davinci_rtc_set_time,
528 .alarm_irq_enable = davinci_rtc_alarm_irq_enable,
529 .read_alarm = davinci_rtc_read_alarm,
530 .set_alarm = davinci_rtc_set_alarm,
531 .irq_set_state = davinci_rtc_irq_set_state,
532 .irq_set_freq = davinci_rtc_irq_set_freq,
533};
534
535static int __init davinci_rtc_probe(struct platform_device *pdev)
536{
537 struct device *dev = &pdev->dev;
538 struct davinci_rtc *davinci_rtc;
539 struct resource *res, *mem;
540 int ret = 0;
541
542 davinci_rtc = kzalloc(sizeof(struct davinci_rtc), GFP_KERNEL);
543 if (!davinci_rtc) {
544 dev_dbg(dev, "could not allocate memory for private data\n");
545 return -ENOMEM;
546 }
547
548 davinci_rtc->irq = platform_get_irq(pdev, 0);
549 if (davinci_rtc->irq < 0) {
550 dev_err(dev, "no RTC irq\n");
551 ret = davinci_rtc->irq;
552 goto fail1;
553 }
554
555 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
556 if (!res) {
557 dev_err(dev, "no mem resource\n");
558 ret = -EINVAL;
559 goto fail1;
560 }
561
562 davinci_rtc->pbase = res->start;
563 davinci_rtc->base_size = resource_size(res);
564
565 mem = request_mem_region(davinci_rtc->pbase, davinci_rtc->base_size,
566 pdev->name);
567 if (!mem) {
568 dev_err(dev, "RTC registers at %08x are not free\n",
569 davinci_rtc->pbase);
570 ret = -EBUSY;
571 goto fail1;
572 }
573
574 davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
575 if (!davinci_rtc->base) {
576 dev_err(dev, "unable to ioremap MEM resource\n");
577 ret = -ENOMEM;
578 goto fail2;
579 }
580
581 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
582 &davinci_rtc_ops, THIS_MODULE);
583 if (IS_ERR(davinci_rtc->rtc)) {
584 dev_err(dev, "unable to register RTC device, err %ld\n",
585 PTR_ERR(davinci_rtc->rtc));
586 goto fail3;
587 }
588
589 rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
590 rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
591 rtcss_write(davinci_rtc, 0, PRTCSS_RTC_INTC_EXTENA1);
592
593 rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
594 rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
595
596 ret = request_irq(davinci_rtc->irq, davinci_rtc_interrupt,
597 IRQF_DISABLED, "davinci_rtc", davinci_rtc);
598 if (ret < 0) {
599 dev_err(dev, "unable to register davinci RTC interrupt\n");
600 goto fail4;
601 }
602
603 /* Enable interrupts */
604 rtcif_write(davinci_rtc, PRTCIF_INTEN_RTCSS, PRTCIF_INTEN);
605 rtcss_write(davinci_rtc, PRTCSS_RTC_INTC_EXTENA1_MASK,
606 PRTCSS_RTC_INTC_EXTENA1);
607
608 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
609
610 platform_set_drvdata(pdev, davinci_rtc);
611
612 device_init_wakeup(&pdev->dev, 0);
613
614 return 0;
615
616fail4:
617 rtc_device_unregister(davinci_rtc->rtc);
618fail3:
619 iounmap(davinci_rtc->base);
620fail2:
621 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
622fail1:
623 kfree(davinci_rtc);
624
625 return ret;
626}
627
628static int __devexit davinci_rtc_remove(struct platform_device *pdev)
629{
630 struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev);
631
632 device_init_wakeup(&pdev->dev, 0);
633
634 rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
635
636 free_irq(davinci_rtc->irq, davinci_rtc);
637
638 rtc_device_unregister(davinci_rtc->rtc);
639
640 iounmap(davinci_rtc->base);
641 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
642
643 platform_set_drvdata(pdev, NULL);
644
645 kfree(davinci_rtc);
646
647 return 0;
648}
649
650static struct platform_driver davinci_rtc_driver = {
651 .probe = davinci_rtc_probe,
652 .remove = __devexit_p(davinci_rtc_remove),
653 .driver = {
654 .name = "rtc_davinci",
655 .owner = THIS_MODULE,
656 },
657};
658
659static int __init rtc_init(void)
660{
661 return platform_driver_probe(&davinci_rtc_driver, davinci_rtc_probe);
662}
663module_init(rtc_init);
664
665static void __exit rtc_exit(void)
666{
667 platform_driver_unregister(&davinci_rtc_driver);
668}
669module_exit(rtc_exit);
670
671MODULE_AUTHOR("Miguel Aguilar <miguel.aguilar@ridgerun.com>");
672MODULE_DESCRIPTION("Texas Instruments DaVinci PRTC Driver");
673MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index acf222f91f5a..0e86247d791e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,9 @@
37 */ 37 */
38#define DASD_CHANQ_MAX_SIZE 4 38#define DASD_CHANQ_MAX_SIZE 4
39 39
40#define DASD_SLEEPON_START_TAG (void *) 1
41#define DASD_SLEEPON_END_TAG (void *) 2
42
40/* 43/*
41 * SECTION: exported variables of dasd.c 44 * SECTION: exported variables of dasd.c
42 */ 45 */
@@ -62,6 +65,7 @@ static void dasd_device_tasklet(struct dasd_device *);
62static void dasd_block_tasklet(struct dasd_block *); 65static void dasd_block_tasklet(struct dasd_block *);
63static void do_kick_device(struct work_struct *); 66static void do_kick_device(struct work_struct *);
64static void do_restore_device(struct work_struct *); 67static void do_restore_device(struct work_struct *);
68static void do_reload_device(struct work_struct *);
65static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 69static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
66static void dasd_device_timeout(unsigned long); 70static void dasd_device_timeout(unsigned long);
67static void dasd_block_timeout(unsigned long); 71static void dasd_block_timeout(unsigned long);
@@ -112,6 +116,7 @@ struct dasd_device *dasd_alloc_device(void)
112 device->timer.data = (unsigned long) device; 116 device->timer.data = (unsigned long) device;
113 INIT_WORK(&device->kick_work, do_kick_device); 117 INIT_WORK(&device->kick_work, do_kick_device);
114 INIT_WORK(&device->restore_device, do_restore_device); 118 INIT_WORK(&device->restore_device, do_restore_device);
119 INIT_WORK(&device->reload_device, do_reload_device);
115 device->state = DASD_STATE_NEW; 120 device->state = DASD_STATE_NEW;
116 device->target = DASD_STATE_NEW; 121 device->target = DASD_STATE_NEW;
117 mutex_init(&device->state_mutex); 122 mutex_init(&device->state_mutex);
@@ -518,6 +523,26 @@ void dasd_kick_device(struct dasd_device *device)
518} 523}
519 524
520/* 525/*
526 * dasd_reload_device will schedule a call do do_reload_device to the kernel
527 * event daemon.
528 */
529static void do_reload_device(struct work_struct *work)
530{
531 struct dasd_device *device = container_of(work, struct dasd_device,
532 reload_device);
533 device->discipline->reload(device);
534 dasd_put_device(device);
535}
536
537void dasd_reload_device(struct dasd_device *device)
538{
539 dasd_get_device(device);
540 /* queue call to dasd_reload_device to the kernel event daemon. */
541 schedule_work(&device->reload_device);
542}
543EXPORT_SYMBOL(dasd_reload_device);
544
545/*
521 * dasd_restore_device will schedule a call do do_restore_device to the kernel 546 * dasd_restore_device will schedule a call do do_restore_device to the kernel
522 * event daemon. 547 * event daemon.
523 */ 548 */
@@ -1472,7 +1497,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1472 */ 1497 */
1473static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1498static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1474{ 1499{
1475 wake_up((wait_queue_head_t *) data); 1500 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1501 cqr->callback_data = DASD_SLEEPON_END_TAG;
1502 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1503 wake_up(&generic_waitq);
1476} 1504}
1477 1505
1478static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1506static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
@@ -1482,10 +1510,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1482 1510
1483 device = cqr->startdev; 1511 device = cqr->startdev;
1484 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1512 spin_lock_irq(get_ccwdev_lock(device->cdev));
1485 rc = ((cqr->status == DASD_CQR_DONE || 1513 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1486 cqr->status == DASD_CQR_NEED_ERP ||
1487 cqr->status == DASD_CQR_TERMINATED) &&
1488 list_empty(&cqr->devlist));
1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1514 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1490 return rc; 1515 return rc;
1491} 1516}
@@ -1573,7 +1598,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1573 wait_event(generic_waitq, !(device->stopped)); 1598 wait_event(generic_waitq, !(device->stopped));
1574 1599
1575 cqr->callback = dasd_wakeup_cb; 1600 cqr->callback = dasd_wakeup_cb;
1576 cqr->callback_data = (void *) &generic_waitq; 1601 cqr->callback_data = DASD_SLEEPON_START_TAG;
1577 dasd_add_request_tail(cqr); 1602 dasd_add_request_tail(cqr);
1578 if (interruptible) { 1603 if (interruptible) {
1579 rc = wait_event_interruptible( 1604 rc = wait_event_interruptible(
@@ -1652,7 +1677,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1652 } 1677 }
1653 1678
1654 cqr->callback = dasd_wakeup_cb; 1679 cqr->callback = dasd_wakeup_cb;
1655 cqr->callback_data = (void *) &generic_waitq; 1680 cqr->callback_data = DASD_SLEEPON_START_TAG;
1656 cqr->status = DASD_CQR_QUEUED; 1681 cqr->status = DASD_CQR_QUEUED;
1657 list_add(&cqr->devlist, &device->ccw_queue); 1682 list_add(&cqr->devlist, &device->ccw_queue);
1658 1683
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6632649dd6aa..85bfd8794856 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1418,9 +1418,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
1418 struct dasd_ccw_req *erp) 1418 struct dasd_ccw_req *erp)
1419{ 1419{
1420 struct dasd_ccw_req *cqr = erp->refers; 1420 struct dasd_ccw_req *cqr = erp->refers;
1421 char *sense;
1421 1422
1422 if (cqr->block && 1423 if (cqr->block &&
1423 (cqr->block->base != cqr->startdev)) { 1424 (cqr->block->base != cqr->startdev)) {
1425
1426 sense = dasd_get_sense(&erp->refers->irb);
1427 /*
1428 * dynamic pav may have changed base alias mapping
1429 */
1430 if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
1431 && (sense[0] == 0x10) && (sense[7] == 0x0F)
1432 && (sense[8] == 0x67)) {
1433 /*
1434 * remove device from alias handling to prevent new
1435 * requests from being scheduled on the
1436 * wrong alias device
1437 */
1438 dasd_alias_remove_device(cqr->startdev);
1439
1440 /* schedule worker to reload device */
1441 dasd_reload_device(cqr->startdev);
1442 }
1443
1424 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) { 1444 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
1425 DBF_DEV_EVENT(DBF_ERR, cqr->startdev, 1445 DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
1426 "ERP on alias device for request %p," 1446 "ERP on alias device for request %p,"
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 8c4814258e93..4155805dcdff 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -190,20 +190,21 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
190 struct alias_server *server, *newserver; 190 struct alias_server *server, *newserver;
191 struct alias_lcu *lcu, *newlcu; 191 struct alias_lcu *lcu, *newlcu;
192 int is_lcu_known; 192 int is_lcu_known;
193 struct dasd_uid *uid; 193 struct dasd_uid uid;
194 194
195 private = (struct dasd_eckd_private *) device->private; 195 private = (struct dasd_eckd_private *) device->private;
196 uid = &private->uid; 196
197 device->discipline->get_uid(device, &uid);
197 spin_lock_irqsave(&aliastree.lock, flags); 198 spin_lock_irqsave(&aliastree.lock, flags);
198 is_lcu_known = 1; 199 is_lcu_known = 1;
199 server = _find_server(uid); 200 server = _find_server(&uid);
200 if (!server) { 201 if (!server) {
201 spin_unlock_irqrestore(&aliastree.lock, flags); 202 spin_unlock_irqrestore(&aliastree.lock, flags);
202 newserver = _allocate_server(uid); 203 newserver = _allocate_server(&uid);
203 if (IS_ERR(newserver)) 204 if (IS_ERR(newserver))
204 return PTR_ERR(newserver); 205 return PTR_ERR(newserver);
205 spin_lock_irqsave(&aliastree.lock, flags); 206 spin_lock_irqsave(&aliastree.lock, flags);
206 server = _find_server(uid); 207 server = _find_server(&uid);
207 if (!server) { 208 if (!server) {
208 list_add(&newserver->server, &aliastree.serverlist); 209 list_add(&newserver->server, &aliastree.serverlist);
209 server = newserver; 210 server = newserver;
@@ -214,14 +215,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
214 } 215 }
215 } 216 }
216 217
217 lcu = _find_lcu(server, uid); 218 lcu = _find_lcu(server, &uid);
218 if (!lcu) { 219 if (!lcu) {
219 spin_unlock_irqrestore(&aliastree.lock, flags); 220 spin_unlock_irqrestore(&aliastree.lock, flags);
220 newlcu = _allocate_lcu(uid); 221 newlcu = _allocate_lcu(&uid);
221 if (IS_ERR(newlcu)) 222 if (IS_ERR(newlcu))
222 return PTR_ERR(newlcu); 223 return PTR_ERR(newlcu);
223 spin_lock_irqsave(&aliastree.lock, flags); 224 spin_lock_irqsave(&aliastree.lock, flags);
224 lcu = _find_lcu(server, uid); 225 lcu = _find_lcu(server, &uid);
225 if (!lcu) { 226 if (!lcu) {
226 list_add(&newlcu->lcu, &server->lculist); 227 list_add(&newlcu->lcu, &server->lculist);
227 lcu = newlcu; 228 lcu = newlcu;
@@ -256,20 +257,20 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
256 unsigned long flags; 257 unsigned long flags;
257 struct alias_server *server; 258 struct alias_server *server;
258 struct alias_lcu *lcu; 259 struct alias_lcu *lcu;
259 struct dasd_uid *uid; 260 struct dasd_uid uid;
260 261
261 private = (struct dasd_eckd_private *) device->private; 262 private = (struct dasd_eckd_private *) device->private;
262 uid = &private->uid; 263 device->discipline->get_uid(device, &uid);
263 lcu = NULL; 264 lcu = NULL;
264 spin_lock_irqsave(&aliastree.lock, flags); 265 spin_lock_irqsave(&aliastree.lock, flags);
265 server = _find_server(uid); 266 server = _find_server(&uid);
266 if (server) 267 if (server)
267 lcu = _find_lcu(server, uid); 268 lcu = _find_lcu(server, &uid);
268 spin_unlock_irqrestore(&aliastree.lock, flags); 269 spin_unlock_irqrestore(&aliastree.lock, flags);
269 if (!lcu) { 270 if (!lcu) {
270 DBF_EVENT_DEVID(DBF_ERR, device->cdev, 271 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
271 "could not find lcu for %04x %02x", 272 "could not find lcu for %04x %02x",
272 uid->ssid, uid->real_unit_addr); 273 uid.ssid, uid.real_unit_addr);
273 WARN_ON(1); 274 WARN_ON(1);
274 return; 275 return;
275 } 276 }
@@ -282,20 +283,20 @@ void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
282 unsigned long flags; 283 unsigned long flags;
283 struct alias_server *server; 284 struct alias_server *server;
284 struct alias_lcu *lcu; 285 struct alias_lcu *lcu;
285 struct dasd_uid *uid; 286 struct dasd_uid uid;
286 287
287 private = (struct dasd_eckd_private *) device->private; 288 private = (struct dasd_eckd_private *) device->private;
288 uid = &private->uid; 289 device->discipline->get_uid(device, &uid);
289 lcu = NULL; 290 lcu = NULL;
290 spin_lock_irqsave(&aliastree.lock, flags); 291 spin_lock_irqsave(&aliastree.lock, flags);
291 server = _find_server(uid); 292 server = _find_server(&uid);
292 if (server) 293 if (server)
293 lcu = _find_lcu(server, uid); 294 lcu = _find_lcu(server, &uid);
294 spin_unlock_irqrestore(&aliastree.lock, flags); 295 spin_unlock_irqrestore(&aliastree.lock, flags);
295 if (!lcu) { 296 if (!lcu) {
296 DBF_EVENT_DEVID(DBF_ERR, device->cdev, 297 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
297 "could not find lcu for %04x %02x", 298 "could not find lcu for %04x %02x",
298 uid->ssid, uid->real_unit_addr); 299 uid.ssid, uid.real_unit_addr);
299 WARN_ON(1); 300 WARN_ON(1);
300 return; 301 return;
301 } 302 }
@@ -314,9 +315,11 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
314 struct alias_lcu *lcu; 315 struct alias_lcu *lcu;
315 struct alias_server *server; 316 struct alias_server *server;
316 int was_pending; 317 int was_pending;
318 struct dasd_uid uid;
317 319
318 private = (struct dasd_eckd_private *) device->private; 320 private = (struct dasd_eckd_private *) device->private;
319 lcu = private->lcu; 321 lcu = private->lcu;
322 device->discipline->get_uid(device, &uid);
320 spin_lock_irqsave(&lcu->lock, flags); 323 spin_lock_irqsave(&lcu->lock, flags);
321 list_del_init(&device->alias_list); 324 list_del_init(&device->alias_list);
322 /* make sure that the workers don't use this device */ 325 /* make sure that the workers don't use this device */
@@ -353,7 +356,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
353 _schedule_lcu_update(lcu, NULL); 356 _schedule_lcu_update(lcu, NULL);
354 spin_unlock(&lcu->lock); 357 spin_unlock(&lcu->lock);
355 } 358 }
356 server = _find_server(&private->uid); 359 server = _find_server(&uid);
357 if (server && list_empty(&server->lculist)) { 360 if (server && list_empty(&server->lculist)) {
358 list_del(&server->server); 361 list_del(&server->server);
359 _free_server(server); 362 _free_server(server);
@@ -366,19 +369,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
366 * in the lcu is up to date and will update the device uid before 369 * in the lcu is up to date and will update the device uid before
367 * adding it to a pav group. 370 * adding it to a pav group.
368 */ 371 */
372
369static int _add_device_to_lcu(struct alias_lcu *lcu, 373static int _add_device_to_lcu(struct alias_lcu *lcu,
370 struct dasd_device *device) 374 struct dasd_device *device,
375 struct dasd_device *pos)
371{ 376{
372 377
373 struct dasd_eckd_private *private; 378 struct dasd_eckd_private *private;
374 struct alias_pav_group *group; 379 struct alias_pav_group *group;
375 struct dasd_uid *uid; 380 struct dasd_uid uid;
381 unsigned long flags;
376 382
377 private = (struct dasd_eckd_private *) device->private; 383 private = (struct dasd_eckd_private *) device->private;
378 uid = &private->uid; 384
379 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type; 385 /* only lock if not already locked */
380 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua; 386 if (device != pos)
381 dasd_set_uid(device->cdev, &private->uid); 387 spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
388 CDEV_NESTED_SECOND);
389 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
390 private->uid.base_unit_addr =
391 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
392 uid = private->uid;
393
394 if (device != pos)
395 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
382 396
383 /* if we have no PAV anyway, we don't need to bother with PAV groups */ 397 /* if we have no PAV anyway, we don't need to bother with PAV groups */
384 if (lcu->pav == NO_PAV) { 398 if (lcu->pav == NO_PAV) {
@@ -386,25 +400,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
386 return 0; 400 return 0;
387 } 401 }
388 402
389 group = _find_group(lcu, uid); 403 group = _find_group(lcu, &uid);
390 if (!group) { 404 if (!group) {
391 group = kzalloc(sizeof(*group), GFP_ATOMIC); 405 group = kzalloc(sizeof(*group), GFP_ATOMIC);
392 if (!group) 406 if (!group)
393 return -ENOMEM; 407 return -ENOMEM;
394 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor)); 408 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
395 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial)); 409 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
396 group->uid.ssid = uid->ssid; 410 group->uid.ssid = uid.ssid;
397 if (uid->type == UA_BASE_DEVICE) 411 if (uid.type == UA_BASE_DEVICE)
398 group->uid.base_unit_addr = uid->real_unit_addr; 412 group->uid.base_unit_addr = uid.real_unit_addr;
399 else 413 else
400 group->uid.base_unit_addr = uid->base_unit_addr; 414 group->uid.base_unit_addr = uid.base_unit_addr;
401 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit)); 415 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
402 INIT_LIST_HEAD(&group->group); 416 INIT_LIST_HEAD(&group->group);
403 INIT_LIST_HEAD(&group->baselist); 417 INIT_LIST_HEAD(&group->baselist);
404 INIT_LIST_HEAD(&group->aliaslist); 418 INIT_LIST_HEAD(&group->aliaslist);
405 list_add(&group->group, &lcu->grouplist); 419 list_add(&group->group, &lcu->grouplist);
406 } 420 }
407 if (uid->type == UA_BASE_DEVICE) 421 if (uid.type == UA_BASE_DEVICE)
408 list_move(&device->alias_list, &group->baselist); 422 list_move(&device->alias_list, &group->baselist);
409 else 423 else
410 list_move(&device->alias_list, &group->aliaslist); 424 list_move(&device->alias_list, &group->aliaslist);
@@ -525,7 +539,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
525 if (rc) 539 if (rc)
526 return rc; 540 return rc;
527 541
528 spin_lock_irqsave(&lcu->lock, flags); 542 /* need to take cdev lock before lcu lock */
543 spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
544 CDEV_NESTED_FIRST);
545 spin_lock(&lcu->lock);
529 lcu->pav = NO_PAV; 546 lcu->pav = NO_PAV;
530 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 547 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
531 switch (lcu->uac->unit[i].ua_type) { 548 switch (lcu->uac->unit[i].ua_type) {
@@ -542,9 +559,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
542 559
543 list_for_each_entry_safe(device, tempdev, &lcu->active_devices, 560 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
544 alias_list) { 561 alias_list) {
545 _add_device_to_lcu(lcu, device); 562 _add_device_to_lcu(lcu, device, refdev);
546 } 563 }
547 spin_unlock_irqrestore(&lcu->lock, flags); 564 spin_unlock(&lcu->lock);
565 spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
548 return 0; 566 return 0;
549} 567}
550 568
@@ -628,9 +646,12 @@ int dasd_alias_add_device(struct dasd_device *device)
628 private = (struct dasd_eckd_private *) device->private; 646 private = (struct dasd_eckd_private *) device->private;
629 lcu = private->lcu; 647 lcu = private->lcu;
630 rc = 0; 648 rc = 0;
631 spin_lock_irqsave(&lcu->lock, flags); 649
650 /* need to take cdev lock before lcu lock */
651 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
652 spin_lock(&lcu->lock);
632 if (!(lcu->flags & UPDATE_PENDING)) { 653 if (!(lcu->flags & UPDATE_PENDING)) {
633 rc = _add_device_to_lcu(lcu, device); 654 rc = _add_device_to_lcu(lcu, device, device);
634 if (rc) 655 if (rc)
635 lcu->flags |= UPDATE_PENDING; 656 lcu->flags |= UPDATE_PENDING;
636 } 657 }
@@ -638,10 +659,19 @@ int dasd_alias_add_device(struct dasd_device *device)
638 list_move(&device->alias_list, &lcu->active_devices); 659 list_move(&device->alias_list, &lcu->active_devices);
639 _schedule_lcu_update(lcu, device); 660 _schedule_lcu_update(lcu, device);
640 } 661 }
641 spin_unlock_irqrestore(&lcu->lock, flags); 662 spin_unlock(&lcu->lock);
663 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
642 return rc; 664 return rc;
643} 665}
644 666
667int dasd_alias_update_add_device(struct dasd_device *device)
668{
669 struct dasd_eckd_private *private;
670 private = (struct dasd_eckd_private *) device->private;
671 private->lcu->flags |= UPDATE_PENDING;
672 return dasd_alias_add_device(device);
673}
674
645int dasd_alias_remove_device(struct dasd_device *device) 675int dasd_alias_remove_device(struct dasd_device *device)
646{ 676{
647 struct dasd_eckd_private *private; 677 struct dasd_eckd_private *private;
@@ -740,19 +770,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
740 struct alias_pav_group *pavgroup; 770 struct alias_pav_group *pavgroup;
741 struct dasd_device *device; 771 struct dasd_device *device;
742 struct dasd_eckd_private *private; 772 struct dasd_eckd_private *private;
773 unsigned long flags;
743 774
744 /* active and inactive list can contain alias as well as base devices */ 775 /* active and inactive list can contain alias as well as base devices */
745 list_for_each_entry(device, &lcu->active_devices, alias_list) { 776 list_for_each_entry(device, &lcu->active_devices, alias_list) {
746 private = (struct dasd_eckd_private *) device->private; 777 private = (struct dasd_eckd_private *) device->private;
747 if (private->uid.type != UA_BASE_DEVICE) 778 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
779 if (private->uid.type != UA_BASE_DEVICE) {
780 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
781 flags);
748 continue; 782 continue;
783 }
784 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
749 dasd_schedule_block_bh(device->block); 785 dasd_schedule_block_bh(device->block);
750 dasd_schedule_device_bh(device); 786 dasd_schedule_device_bh(device);
751 } 787 }
752 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 788 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
753 private = (struct dasd_eckd_private *) device->private; 789 private = (struct dasd_eckd_private *) device->private;
754 if (private->uid.type != UA_BASE_DEVICE) 790 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
791 if (private->uid.type != UA_BASE_DEVICE) {
792 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
793 flags);
755 continue; 794 continue;
795 }
796 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
756 dasd_schedule_block_bh(device->block); 797 dasd_schedule_block_bh(device->block);
757 dasd_schedule_device_bh(device); 798 dasd_schedule_device_bh(device);
758 } 799 }
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index eff9c812c5c2..34d51dd4c539 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,7 +49,6 @@ struct dasd_devmap {
49 unsigned int devindex; 49 unsigned int devindex;
50 unsigned short features; 50 unsigned short features;
51 struct dasd_device *device; 51 struct dasd_device *device;
52 struct dasd_uid uid;
53}; 52};
54 53
55/* 54/*
@@ -936,42 +935,46 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
936 935
937static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL); 936static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
938 937
939static ssize_t 938static ssize_t dasd_alias_show(struct device *dev,
940dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) 939 struct device_attribute *attr, char *buf)
941{ 940{
942 struct dasd_devmap *devmap; 941 struct dasd_device *device;
943 int alias; 942 struct dasd_uid uid;
944 943
945 devmap = dasd_find_busid(dev_name(dev)); 944 device = dasd_device_from_cdev(to_ccwdev(dev));
946 spin_lock(&dasd_devmap_lock); 945 if (IS_ERR(device))
947 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
948 spin_unlock(&dasd_devmap_lock);
949 return sprintf(buf, "0\n"); 946 return sprintf(buf, "0\n");
947
948 if (device->discipline && device->discipline->get_uid &&
949 !device->discipline->get_uid(device, &uid)) {
950 if (uid.type == UA_BASE_PAV_ALIAS ||
951 uid.type == UA_HYPER_PAV_ALIAS)
952 return sprintf(buf, "1\n");
950 } 953 }
951 if (devmap->uid.type == UA_BASE_PAV_ALIAS || 954 dasd_put_device(device);
952 devmap->uid.type == UA_HYPER_PAV_ALIAS) 955
953 alias = 1; 956 return sprintf(buf, "0\n");
954 else
955 alias = 0;
956 spin_unlock(&dasd_devmap_lock);
957 return sprintf(buf, alias ? "1\n" : "0\n");
958} 957}
959 958
960static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL); 959static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
961 960
962static ssize_t 961static ssize_t dasd_vendor_show(struct device *dev,
963dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) 962 struct device_attribute *attr, char *buf)
964{ 963{
965 struct dasd_devmap *devmap; 964 struct dasd_device *device;
965 struct dasd_uid uid;
966 char *vendor; 966 char *vendor;
967 967
968 devmap = dasd_find_busid(dev_name(dev)); 968 device = dasd_device_from_cdev(to_ccwdev(dev));
969 spin_lock(&dasd_devmap_lock); 969 vendor = "";
970 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) 970 if (IS_ERR(device))
971 vendor = devmap->uid.vendor; 971 return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
972 else 972
973 vendor = ""; 973 if (device->discipline && device->discipline->get_uid &&
974 spin_unlock(&dasd_devmap_lock); 974 !device->discipline->get_uid(device, &uid))
975 vendor = uid.vendor;
976
977 dasd_put_device(device);
975 978
976 return snprintf(buf, PAGE_SIZE, "%s\n", vendor); 979 return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
977} 980}
@@ -985,48 +988,51 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
985static ssize_t 988static ssize_t
986dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) 989dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
987{ 990{
988 struct dasd_devmap *devmap; 991 struct dasd_device *device;
992 struct dasd_uid uid;
989 char uid_string[UID_STRLEN]; 993 char uid_string[UID_STRLEN];
990 char ua_string[3]; 994 char ua_string[3];
991 struct dasd_uid *uid;
992 995
993 devmap = dasd_find_busid(dev_name(dev)); 996 device = dasd_device_from_cdev(to_ccwdev(dev));
994 spin_lock(&dasd_devmap_lock); 997 uid_string[0] = 0;
995 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { 998 if (IS_ERR(device))
996 spin_unlock(&dasd_devmap_lock); 999 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
997 return sprintf(buf, "\n"); 1000
998 } 1001 if (device->discipline && device->discipline->get_uid &&
999 uid = &devmap->uid; 1002 !device->discipline->get_uid(device, &uid)) {
1000 switch (uid->type) { 1003 switch (uid.type) {
1001 case UA_BASE_DEVICE: 1004 case UA_BASE_DEVICE:
1002 sprintf(ua_string, "%02x", uid->real_unit_addr); 1005 snprintf(ua_string, sizeof(ua_string), "%02x",
1003 break; 1006 uid.real_unit_addr);
1004 case UA_BASE_PAV_ALIAS: 1007 break;
1005 sprintf(ua_string, "%02x", uid->base_unit_addr); 1008 case UA_BASE_PAV_ALIAS:
1006 break; 1009 snprintf(ua_string, sizeof(ua_string), "%02x",
1007 case UA_HYPER_PAV_ALIAS: 1010 uid.base_unit_addr);
1008 sprintf(ua_string, "xx"); 1011 break;
1009 break; 1012 case UA_HYPER_PAV_ALIAS:
1010 default: 1013 snprintf(ua_string, sizeof(ua_string), "xx");
1011 /* should not happen, treat like base device */ 1014 break;
1012 sprintf(ua_string, "%02x", uid->real_unit_addr); 1015 default:
1013 break; 1016 /* should not happen, treat like base device */
1017 snprintf(ua_string, sizeof(ua_string), "%02x",
1018 uid.real_unit_addr);
1019 break;
1020 }
1021
1022 if (strlen(uid.vduit) > 0)
1023 snprintf(uid_string, sizeof(uid_string),
1024 "%s.%s.%04x.%s.%s",
1025 uid.vendor, uid.serial, uid.ssid, ua_string,
1026 uid.vduit);
1027 else
1028 snprintf(uid_string, sizeof(uid_string),
1029 "%s.%s.%04x.%s",
1030 uid.vendor, uid.serial, uid.ssid, ua_string);
1014 } 1031 }
1015 if (strlen(uid->vduit) > 0) 1032 dasd_put_device(device);
1016 snprintf(uid_string, sizeof(uid_string), 1033
1017 "%s.%s.%04x.%s.%s",
1018 uid->vendor, uid->serial,
1019 uid->ssid, ua_string,
1020 uid->vduit);
1021 else
1022 snprintf(uid_string, sizeof(uid_string),
1023 "%s.%s.%04x.%s",
1024 uid->vendor, uid->serial,
1025 uid->ssid, ua_string);
1026 spin_unlock(&dasd_devmap_lock);
1027 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); 1034 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
1028} 1035}
1029
1030static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); 1036static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
1031 1037
1032/* 1038/*
@@ -1094,50 +1100,6 @@ static struct attribute_group dasd_attr_group = {
1094}; 1100};
1095 1101
1096/* 1102/*
1097 * Return copy of the device unique identifier.
1098 */
1099int
1100dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1101{
1102 struct dasd_devmap *devmap;
1103
1104 devmap = dasd_find_busid(dev_name(&cdev->dev));
1105 if (IS_ERR(devmap))
1106 return PTR_ERR(devmap);
1107 spin_lock(&dasd_devmap_lock);
1108 *uid = devmap->uid;
1109 spin_unlock(&dasd_devmap_lock);
1110 return 0;
1111}
1112EXPORT_SYMBOL_GPL(dasd_get_uid);
1113
1114/*
1115 * Register the given device unique identifier into devmap struct.
1116 * In addition check if the related storage server subsystem ID is already
1117 * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
1118 * create new entry.
1119 * Return 0 if server was already in serverlist,
1120 * 1 if the server was added successful
1121 * <0 in case of error.
1122 */
1123int
1124dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1125{
1126 struct dasd_devmap *devmap;
1127
1128 devmap = dasd_find_busid(dev_name(&cdev->dev));
1129 if (IS_ERR(devmap))
1130 return PTR_ERR(devmap);
1131
1132 spin_lock(&dasd_devmap_lock);
1133 devmap->uid = *uid;
1134 spin_unlock(&dasd_devmap_lock);
1135
1136 return 0;
1137}
1138EXPORT_SYMBOL_GPL(dasd_set_uid);
1139
1140/*
1141 * Return value of the specified feature. 1103 * Return value of the specified feature.
1142 */ 1104 */
1143int 1105int
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0cb233116855..5b1cd8d6e971 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -692,18 +692,20 @@ dasd_eckd_cdl_reclen(int recid)
692/* 692/*
693 * Generate device unique id that specifies the physical device. 693 * Generate device unique id that specifies the physical device.
694 */ 694 */
695static int dasd_eckd_generate_uid(struct dasd_device *device, 695static int dasd_eckd_generate_uid(struct dasd_device *device)
696 struct dasd_uid *uid)
697{ 696{
698 struct dasd_eckd_private *private; 697 struct dasd_eckd_private *private;
698 struct dasd_uid *uid;
699 int count; 699 int count;
700 unsigned long flags;
700 701
701 private = (struct dasd_eckd_private *) device->private; 702 private = (struct dasd_eckd_private *) device->private;
702 if (!private) 703 if (!private)
703 return -ENODEV; 704 return -ENODEV;
704 if (!private->ned || !private->gneq) 705 if (!private->ned || !private->gneq)
705 return -ENODEV; 706 return -ENODEV;
706 707 uid = &private->uid;
708 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
707 memset(uid, 0, sizeof(struct dasd_uid)); 709 memset(uid, 0, sizeof(struct dasd_uid));
708 memcpy(uid->vendor, private->ned->HDA_manufacturer, 710 memcpy(uid->vendor, private->ned->HDA_manufacturer,
709 sizeof(uid->vendor) - 1); 711 sizeof(uid->vendor) - 1);
@@ -726,9 +728,25 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
726 private->vdsneq->uit[count]); 728 private->vdsneq->uit[count]);
727 } 729 }
728 } 730 }
731 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
729 return 0; 732 return 0;
730} 733}
731 734
735static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
736{
737 struct dasd_eckd_private *private;
738 unsigned long flags;
739
740 if (device->private) {
741 private = (struct dasd_eckd_private *)device->private;
742 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
743 *uid = private->uid;
744 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
745 return 0;
746 }
747 return -EINVAL;
748}
749
732static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 750static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
733 void *rcd_buffer, 751 void *rcd_buffer,
734 struct ciw *ciw, __u8 lpm) 752 struct ciw *ciw, __u8 lpm)
@@ -1088,6 +1106,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1088{ 1106{
1089 struct dasd_eckd_private *private; 1107 struct dasd_eckd_private *private;
1090 struct dasd_block *block; 1108 struct dasd_block *block;
1109 struct dasd_uid temp_uid;
1091 int is_known, rc; 1110 int is_known, rc;
1092 int readonly; 1111 int readonly;
1093 1112
@@ -1124,13 +1143,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1124 if (rc) 1143 if (rc)
1125 goto out_err1; 1144 goto out_err1;
1126 1145
1127 /* Generate device unique id and register in devmap */ 1146 /* Generate device unique id */
1128 rc = dasd_eckd_generate_uid(device, &private->uid); 1147 rc = dasd_eckd_generate_uid(device);
1129 if (rc) 1148 if (rc)
1130 goto out_err1; 1149 goto out_err1;
1131 dasd_set_uid(device->cdev, &private->uid);
1132 1150
1133 if (private->uid.type == UA_BASE_DEVICE) { 1151 dasd_eckd_get_uid(device, &temp_uid);
1152 if (temp_uid.type == UA_BASE_DEVICE) {
1134 block = dasd_alloc_block(); 1153 block = dasd_alloc_block();
1135 if (IS_ERR(block)) { 1154 if (IS_ERR(block)) {
1136 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1155 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
@@ -1451,6 +1470,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
1451 1470
1452static int dasd_eckd_online_to_ready(struct dasd_device *device) 1471static int dasd_eckd_online_to_ready(struct dasd_device *device)
1453{ 1472{
1473 cancel_work_sync(&device->reload_device);
1454 return dasd_alias_remove_device(device); 1474 return dasd_alias_remove_device(device);
1455}; 1475};
1456 1476
@@ -1709,10 +1729,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1709{ 1729{
1710 char mask; 1730 char mask;
1711 char *sense = NULL; 1731 char *sense = NULL;
1732 struct dasd_eckd_private *private;
1712 1733
1734 private = (struct dasd_eckd_private *) device->private;
1713 /* first of all check for state change pending interrupt */ 1735 /* first of all check for state change pending interrupt */
1714 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1736 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1715 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 1737 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1738 /* for alias only and not in offline processing*/
1739 if (!device->block && private->lcu &&
1740 !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1741 /*
1742 * the state change could be caused by an alias
1743 * reassignment remove device from alias handling
1744 * to prevent new requests from being scheduled on
1745 * the wrong alias device
1746 */
1747 dasd_alias_remove_device(device);
1748
1749 /* schedule worker to reload device */
1750 dasd_reload_device(device);
1751 }
1752
1716 dasd_generic_handle_state_change(device); 1753 dasd_generic_handle_state_change(device);
1717 return; 1754 return;
1718 } 1755 }
@@ -3259,7 +3296,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
3259 dasd_eckd_dump_sense_ccw(device, req, irb); 3296 dasd_eckd_dump_sense_ccw(device, req, irb);
3260} 3297}
3261 3298
3262int dasd_eckd_pm_freeze(struct dasd_device *device) 3299static int dasd_eckd_pm_freeze(struct dasd_device *device)
3263{ 3300{
3264 /* 3301 /*
3265 * the device should be disconnected from our LCU structure 3302 * the device should be disconnected from our LCU structure
@@ -3272,7 +3309,7 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
3272 return 0; 3309 return 0;
3273} 3310}
3274 3311
3275int dasd_eckd_restore_device(struct dasd_device *device) 3312static int dasd_eckd_restore_device(struct dasd_device *device)
3276{ 3313{
3277 struct dasd_eckd_private *private; 3314 struct dasd_eckd_private *private;
3278 struct dasd_eckd_characteristics temp_rdc_data; 3315 struct dasd_eckd_characteristics temp_rdc_data;
@@ -3287,15 +3324,16 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3287 if (rc) 3324 if (rc)
3288 goto out_err; 3325 goto out_err;
3289 3326
3290 /* Generate device unique id and register in devmap */ 3327 dasd_eckd_get_uid(device, &temp_uid);
3291 rc = dasd_eckd_generate_uid(device, &private->uid); 3328 /* Generate device unique id */
3292 dasd_get_uid(device->cdev, &temp_uid); 3329 rc = dasd_eckd_generate_uid(device);
3330 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3293 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 3331 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3294 dev_err(&device->cdev->dev, "The UID of the DASD has " 3332 dev_err(&device->cdev->dev, "The UID of the DASD has "
3295 "changed\n"); 3333 "changed\n");
3334 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3296 if (rc) 3335 if (rc)
3297 goto out_err; 3336 goto out_err;
3298 dasd_set_uid(device->cdev, &private->uid);
3299 3337
3300 /* register lcu with alias handling, enable PAV if this is a new lcu */ 3338 /* register lcu with alias handling, enable PAV if this is a new lcu */
3301 is_known = dasd_alias_make_device_known_to_lcu(device); 3339 is_known = dasd_alias_make_device_known_to_lcu(device);
@@ -3336,6 +3374,56 @@ out_err:
3336 return -1; 3374 return -1;
3337} 3375}
3338 3376
3377static int dasd_eckd_reload_device(struct dasd_device *device)
3378{
3379 struct dasd_eckd_private *private;
3380 int rc, old_base;
3381 char print_uid[60];
3382 struct dasd_uid uid;
3383 unsigned long flags;
3384
3385 private = (struct dasd_eckd_private *) device->private;
3386
3387 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3388 old_base = private->uid.base_unit_addr;
3389 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3390
3391 /* Read Configuration Data */
3392 rc = dasd_eckd_read_conf(device);
3393 if (rc)
3394 goto out_err;
3395
3396 rc = dasd_eckd_generate_uid(device);
3397 if (rc)
3398 goto out_err;
3399 /*
3400 * update unit address configuration and
3401 * add device to alias management
3402 */
3403 dasd_alias_update_add_device(device);
3404
3405 dasd_eckd_get_uid(device, &uid);
3406
3407 if (old_base != uid.base_unit_addr) {
3408 if (strlen(uid.vduit) > 0)
3409 snprintf(print_uid, sizeof(print_uid),
3410 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
3411 uid.ssid, uid.base_unit_addr, uid.vduit);
3412 else
3413 snprintf(print_uid, sizeof(print_uid),
3414 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
3415 uid.ssid, uid.base_unit_addr);
3416
3417 dev_info(&device->cdev->dev,
3418 "An Alias device was reassigned to a new base device "
3419 "with UID: %s\n", print_uid);
3420 }
3421 return 0;
3422
3423out_err:
3424 return -1;
3425}
3426
3339static struct ccw_driver dasd_eckd_driver = { 3427static struct ccw_driver dasd_eckd_driver = {
3340 .name = "dasd-eckd", 3428 .name = "dasd-eckd",
3341 .owner = THIS_MODULE, 3429 .owner = THIS_MODULE,
@@ -3389,6 +3477,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
3389 .ioctl = dasd_eckd_ioctl, 3477 .ioctl = dasd_eckd_ioctl,
3390 .freeze = dasd_eckd_pm_freeze, 3478 .freeze = dasd_eckd_pm_freeze,
3391 .restore = dasd_eckd_restore_device, 3479 .restore = dasd_eckd_restore_device,
3480 .reload = dasd_eckd_reload_device,
3481 .get_uid = dasd_eckd_get_uid,
3392}; 3482};
3393 3483
3394static int __init 3484static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 864d53c04201..dd6385a5af14 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -426,7 +426,6 @@ struct alias_pav_group {
426 struct dasd_device *next; 426 struct dasd_device *next;
427}; 427};
428 428
429
430struct dasd_eckd_private { 429struct dasd_eckd_private {
431 struct dasd_eckd_characteristics rdc_data; 430 struct dasd_eckd_characteristics rdc_data;
432 u8 *conf_data; 431 u8 *conf_data;
@@ -463,4 +462,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
463void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 462void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
464void dasd_alias_lcu_setup_complete(struct dasd_device *); 463void dasd_alias_lcu_setup_complete(struct dasd_device *);
465void dasd_alias_wait_for_lcu_setup(struct dasd_device *); 464void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
465int dasd_alias_update_add_device(struct dasd_device *);
466#endif /* DASD_ECKD_H */ 466#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a91d4a97d4f2..32fac186ba3f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -81,6 +81,10 @@ struct dasd_block;
81#define DASD_SIM_MSG_TO_OP 0x03 81#define DASD_SIM_MSG_TO_OP 0x03
82#define DASD_SIM_LOG 0x0C 82#define DASD_SIM_LOG 0x0C
83 83
84/* lock class for nested cdev lock */
85#define CDEV_NESTED_FIRST 1
86#define CDEV_NESTED_SECOND 2
87
84/* 88/*
85 * SECTION: MACROs for klogd and s390 debug feature (dbf) 89 * SECTION: MACROs for klogd and s390 debug feature (dbf)
86 */ 90 */
@@ -229,6 +233,24 @@ struct dasd_ccw_req {
229typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 233typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
230 234
231/* 235/*
236 * Unique identifier for dasd device.
237 */
238#define UA_NOT_CONFIGURED 0x00
239#define UA_BASE_DEVICE 0x01
240#define UA_BASE_PAV_ALIAS 0x02
241#define UA_HYPER_PAV_ALIAS 0x03
242
243struct dasd_uid {
244 __u8 type;
245 char vendor[4];
246 char serial[15];
247 __u16 ssid;
248 __u8 real_unit_addr;
249 __u8 base_unit_addr;
250 char vduit[33];
251};
252
253/*
232 * the struct dasd_discipline is 254 * the struct dasd_discipline is
233 * sth like a table of virtual functions, if you think of dasd_eckd 255 * sth like a table of virtual functions, if you think of dasd_eckd
234 * inheriting dasd... 256 * inheriting dasd...
@@ -312,28 +334,15 @@ struct dasd_discipline {
312 /* suspend/resume functions */ 334 /* suspend/resume functions */
313 int (*freeze) (struct dasd_device *); 335 int (*freeze) (struct dasd_device *);
314 int (*restore) (struct dasd_device *); 336 int (*restore) (struct dasd_device *);
315};
316 337
317extern struct dasd_discipline *dasd_diag_discipline_pointer; 338 /* reload device after state change */
318 339 int (*reload) (struct dasd_device *);
319/*
320 * Unique identifier for dasd device.
321 */
322#define UA_NOT_CONFIGURED 0x00
323#define UA_BASE_DEVICE 0x01
324#define UA_BASE_PAV_ALIAS 0x02
325#define UA_HYPER_PAV_ALIAS 0x03
326 340
327struct dasd_uid { 341 int (*get_uid) (struct dasd_device *, struct dasd_uid *);
328 __u8 type;
329 char vendor[4];
330 char serial[15];
331 __u16 ssid;
332 __u8 real_unit_addr;
333 __u8 base_unit_addr;
334 char vduit[33];
335}; 342};
336 343
344extern struct dasd_discipline *dasd_diag_discipline_pointer;
345
337/* 346/*
338 * Notification numbers for extended error reporting notifications: 347 * Notification numbers for extended error reporting notifications:
339 * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's 348 * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
@@ -386,6 +395,7 @@ struct dasd_device {
386 struct tasklet_struct tasklet; 395 struct tasklet_struct tasklet;
387 struct work_struct kick_work; 396 struct work_struct kick_work;
388 struct work_struct restore_device; 397 struct work_struct restore_device;
398 struct work_struct reload_device;
389 struct timer_list timer; 399 struct timer_list timer;
390 400
391 debug_info_t *debug_area; 401 debug_info_t *debug_area;
@@ -582,6 +592,7 @@ void dasd_enable_device(struct dasd_device *);
582void dasd_set_target_state(struct dasd_device *, int); 592void dasd_set_target_state(struct dasd_device *, int);
583void dasd_kick_device(struct dasd_device *); 593void dasd_kick_device(struct dasd_device *);
584void dasd_restore_device(struct dasd_device *); 594void dasd_restore_device(struct dasd_device *);
595void dasd_reload_device(struct dasd_device *);
585 596
586void dasd_add_request_head(struct dasd_ccw_req *); 597void dasd_add_request_head(struct dasd_ccw_req *);
587void dasd_add_request_tail(struct dasd_ccw_req *); 598void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -629,8 +640,6 @@ void dasd_devmap_exit(void);
629struct dasd_device *dasd_create_device(struct ccw_device *); 640struct dasd_device *dasd_create_device(struct ccw_device *);
630void dasd_delete_device(struct dasd_device *); 641void dasd_delete_device(struct dasd_device *);
631 642
632int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
633int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
634int dasd_get_feature(struct ccw_device *, int); 643int dasd_get_feature(struct ccw_device *, int);
635int dasd_set_feature(struct ccw_device *, int, int); 644int dasd_set_feature(struct ccw_device *, int, int);
636 645
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 4e34d3686c23..40834f18754c 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -148,13 +148,12 @@ config VMLOGRDR
148 This driver depends on the IUCV support driver. 148 This driver depends on the IUCV support driver.
149 149
150config VMCP 150config VMCP
151 tristate "Support for the z/VM CP interface (VM only)" 151 bool "Support for the z/VM CP interface"
152 depends on S390 152 depends on S390
153 help 153 help
154 Select this option if you want to be able to interact with the control 154 Select this option if you want to be able to interact with the control
155 program on z/VM 155 program on z/VM
156 156
157
158config MONREADER 157config MONREADER
159 tristate "API for reading z/VM monitor service records" 158 tristate "API for reading z/VM monitor service records"
160 depends on IUCV 159 depends on IUCV
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0eabcca3c92d..857dfcb7b359 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -484,6 +484,7 @@ fs3270_open(struct inode *inode, struct file *filp)
484 raw3270_del_view(&fp->view); 484 raw3270_del_view(&fp->view);
485 goto out; 485 goto out;
486 } 486 }
487 nonseekable_open(inode, filp);
487 filp->private_data = fp; 488 filp->private_data = fp;
488out: 489out:
489 mutex_unlock(&fs3270_mutex); 490 mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index cb6bffe7141a..18d9a497863b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -49,7 +49,7 @@ static unsigned char ret_diacr[NR_DEAD] = {
49struct kbd_data * 49struct kbd_data *
50kbd_alloc(void) { 50kbd_alloc(void) {
51 struct kbd_data *kbd; 51 struct kbd_data *kbd;
52 int i, len; 52 int i;
53 53
54 kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); 54 kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
55 if (!kbd) 55 if (!kbd)
@@ -59,12 +59,11 @@ kbd_alloc(void) {
59 goto out_kbd; 59 goto out_kbd;
60 for (i = 0; i < ARRAY_SIZE(key_maps); i++) { 60 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
61 if (key_maps[i]) { 61 if (key_maps[i]) {
62 kbd->key_maps[i] = 62 kbd->key_maps[i] = kmemdup(key_maps[i],
63 kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL); 63 sizeof(u_short) * NR_KEYS,
64 GFP_KERNEL);
64 if (!kbd->key_maps[i]) 65 if (!kbd->key_maps[i])
65 goto out_maps; 66 goto out_maps;
66 memcpy(kbd->key_maps[i], key_maps[i],
67 sizeof(u_short)*NR_KEYS);
68 } 67 }
69 } 68 }
70 kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); 69 kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
@@ -72,23 +71,21 @@ kbd_alloc(void) {
72 goto out_maps; 71 goto out_maps;
73 for (i = 0; i < ARRAY_SIZE(func_table); i++) { 72 for (i = 0; i < ARRAY_SIZE(func_table); i++) {
74 if (func_table[i]) { 73 if (func_table[i]) {
75 len = strlen(func_table[i]) + 1; 74 kbd->func_table[i] = kstrdup(func_table[i],
76 kbd->func_table[i] = kmalloc(len, GFP_KERNEL); 75 GFP_KERNEL);
77 if (!kbd->func_table[i]) 76 if (!kbd->func_table[i])
78 goto out_func; 77 goto out_func;
79 memcpy(kbd->func_table[i], func_table[i], len);
80 } 78 }
81 } 79 }
82 kbd->fn_handler = 80 kbd->fn_handler =
83 kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); 81 kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
84 if (!kbd->fn_handler) 82 if (!kbd->fn_handler)
85 goto out_func; 83 goto out_func;
86 kbd->accent_table = 84 kbd->accent_table = kmemdup(accent_table,
87 kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL); 85 sizeof(struct kbdiacruc) * MAX_DIACR,
86 GFP_KERNEL);
88 if (!kbd->accent_table) 87 if (!kbd->accent_table)
89 goto out_fn_handler; 88 goto out_fn_handler;
90 memcpy(kbd->accent_table, accent_table,
91 sizeof(struct kbdiacruc)*MAX_DIACR);
92 kbd->accent_table_size = accent_table_size; 89 kbd->accent_table_size = accent_table_size;
93 return kbd; 90 return kbd;
94 91
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 5bb59d36a6d4..04e532eec032 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,24 +1,20 @@
1/* 1/*
2 * Copyright IBM Corp. 2004,2007 2 * Copyright IBM Corp. 2004,2010
3 * Interface implementation for communication with the z/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
5 * 4 *
5 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
6 * 6 *
7 * z/VMs CP offers the possibility to issue commands via the diagnose code 8 7 * z/VMs CP offers the possibility to issue commands via the diagnose code 8
8 * this driver implements a character device that issues these commands and 8 * this driver implements a character device that issues these commands and
9 * returns the answer of CP. 9 * returns the answer of CP.
10 10 *
11 * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS 11 * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
12 */ 12 */
13 13
14#define KMSG_COMPONENT "vmcp"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/fs.h> 14#include <linux/fs.h>
18#include <linux/init.h> 15#include <linux/init.h>
19#include <linux/kernel.h> 16#include <linux/kernel.h>
20#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
21#include <linux/module.h>
22#include <linux/slab.h> 18#include <linux/slab.h>
23#include <asm/compat.h> 19#include <asm/compat.h>
24#include <asm/cpcmd.h> 20#include <asm/cpcmd.h>
@@ -26,10 +22,6 @@
26#include <asm/uaccess.h> 22#include <asm/uaccess.h>
27#include "vmcp.h" 23#include "vmcp.h"
28 24
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
31MODULE_DESCRIPTION("z/VM CP interface");
32
33static debug_info_t *vmcp_debug; 25static debug_info_t *vmcp_debug;
34 26
35static int vmcp_open(struct inode *inode, struct file *file) 27static int vmcp_open(struct inode *inode, struct file *file)
@@ -197,11 +189,8 @@ static int __init vmcp_init(void)
197{ 189{
198 int ret; 190 int ret;
199 191
200 if (!MACHINE_IS_VM) { 192 if (!MACHINE_IS_VM)
201 pr_warning("The z/VM CP interface device driver cannot be " 193 return 0;
202 "loaded without z/VM\n");
203 return -ENODEV;
204 }
205 194
206 vmcp_debug = debug_register("vmcp", 1, 1, 240); 195 vmcp_debug = debug_register("vmcp", 1, 1, 240);
207 if (!vmcp_debug) 196 if (!vmcp_debug)
@@ -214,19 +203,8 @@ static int __init vmcp_init(void)
214 } 203 }
215 204
216 ret = misc_register(&vmcp_dev); 205 ret = misc_register(&vmcp_dev);
217 if (ret) { 206 if (ret)
218 debug_unregister(vmcp_debug); 207 debug_unregister(vmcp_debug);
219 return ret; 208 return ret;
220 }
221
222 return 0;
223}
224
225static void __exit vmcp_exit(void)
226{
227 misc_deregister(&vmcp_dev);
228 debug_unregister(vmcp_debug);
229} 209}
230 210device_initcall(vmcp_init);
231module_init(vmcp_init);
232module_exit(vmcp_exit);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7217966f7d31..f5ea3384a4b9 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -445,7 +445,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
445 } 445 }
446 kfree(chunk_array); 446 kfree(chunk_array);
447 filp->private_data = buf; 447 filp->private_data = buf;
448 return 0; 448 return nonseekable_open(inode, filp);
449} 449}
450 450
451static int zcore_memmap_release(struct inode *inode, struct file *filp) 451static int zcore_memmap_release(struct inode *inode, struct file *filp)
@@ -473,7 +473,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
473 473
474static int zcore_reipl_open(struct inode *inode, struct file *filp) 474static int zcore_reipl_open(struct inode *inode, struct file *filp)
475{ 475{
476 return 0; 476 return nonseekable_open(inode, filp);
477} 477}
478 478
479static int zcore_reipl_release(struct inode *inode, struct file *filp) 479static int zcore_reipl_release(struct inode *inode, struct file *filp)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3b6f4adc5094..a83877c664a6 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -803,6 +803,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
803 803
804static const struct file_operations chsc_fops = { 804static const struct file_operations chsc_fops = {
805 .owner = THIS_MODULE, 805 .owner = THIS_MODULE,
806 .open = nonseekable_open,
806 .unlocked_ioctl = chsc_ioctl, 807 .unlocked_ioctl = chsc_ioctl,
807 .compat_ioctl = chsc_ioctl, 808 .compat_ioctl = chsc_ioctl,
808}; 809};
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5feea1a371e1..f4e6cf3aceb8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -616,7 +616,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
616 struct pt_regs *old_regs; 616 struct pt_regs *old_regs;
617 617
618 old_regs = set_irq_regs(regs); 618 old_regs = set_irq_regs(regs);
619 s390_idle_check(); 619 s390_idle_check(regs, S390_lowcore.int_clock,
620 S390_lowcore.async_enter_timer);
620 irq_enter(); 621 irq_enter();
621 __get_cpu_var(s390_idle).nohz_delay = 1; 622 __get_cpu_var(s390_idle).nohz_delay = 1;
622 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 623 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 511649115bd7..ac94ac751459 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -648,6 +648,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
648static void __init 648static void __init
649css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 649css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
650{ 650{
651 struct cpuid cpu_id;
652
651 if (css_general_characteristics.mcss) { 653 if (css_general_characteristics.mcss) {
652 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 654 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
653 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -658,8 +660,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
658 css->global_pgid.pgid_high.cpu_addr = 0; 660 css->global_pgid.pgid_high.cpu_addr = 0;
659#endif 661#endif
660 } 662 }
661 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; 663 get_cpu_id(&cpu_id);
662 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; 664 css->global_pgid.cpu_id = cpu_id.ident;
665 css->global_pgid.cpu_model = cpu_id.machine;
663 css->global_pgid.tod_high = tod_high; 666 css->global_pgid.tod_high = tod_high;
664 667
665} 668}
@@ -1062,6 +1065,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1062} 1065}
1063 1066
1064static const struct file_operations cio_settle_proc_fops = { 1067static const struct file_operations cio_settle_proc_fops = {
1068 .open = nonseekable_open,
1065 .write = cio_settle_write, 1069 .write = cio_settle_write,
1066}; 1070};
1067 1071
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 48aa0647432b..f0037eefd44e 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -13,8 +13,8 @@
13#include <asm/debug.h> 13#include <asm/debug.h>
14#include "chsc.h" 14#include "chsc.h"
15 15
16#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ 16#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ 17#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
18 18
19/* 19/*
20 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait 20 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
@@ -296,10 +296,8 @@ struct qdio_q {
296 struct qdio_irq *irq_ptr; 296 struct qdio_irq *irq_ptr;
297 struct sl *sl; 297 struct sl *sl;
298 /* 298 /*
299 * Warning: Leave this member at the end so it won't be cleared in 299 * A page is allocated under this pointer and used for slib and sl.
300 * qdio_fill_qs. A page is allocated under this pointer and used for 300 * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
301 * slib and sl. slib is 2048 bytes big and sl points to offset
302 * PAGE_SIZE / 2.
303 */ 301 */
304 struct slib *slib; 302 struct slib *slib;
305} __attribute__ ((aligned(256))); 303} __attribute__ ((aligned(256)));
@@ -372,11 +370,6 @@ static inline int multicast_outbound(struct qdio_q *q)
372 (q->nr == q->irq_ptr->nr_output_qs - 1); 370 (q->nr == q->irq_ptr->nr_output_qs - 1);
373} 371}
374 372
375static inline unsigned long long get_usecs(void)
376{
377 return monotonic_clock() >> 12;
378}
379
380#define pci_out_supported(q) \ 373#define pci_out_supported(q) \
381 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) 374 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
382#define is_qebsm(q) (q->irq_ptr->sch_token != 0) 375#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 88be7b9ea6e1..00520f9a7a8e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -336,10 +336,10 @@ again:
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 337
338 if (!start_time) { 338 if (!start_time) {
339 start_time = get_usecs(); 339 start_time = get_clock();
340 goto again; 340 goto again;
341 } 341 }
342 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) 342 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
343 goto again; 343 goto again;
344 } 344 }
345 return cc; 345 return cc;
@@ -536,7 +536,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
536 if ((bufnr != q->last_move) || q->qdio_error) { 536 if ((bufnr != q->last_move) || q->qdio_error) {
537 q->last_move = bufnr; 537 q->last_move = bufnr;
538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
539 q->u.in.timestamp = get_usecs(); 539 q->u.in.timestamp = get_clock();
540 return 1; 540 return 1;
541 } else 541 } else
542 return 0; 542 return 0;
@@ -567,7 +567,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
567 * At this point we know, that inbound first_to_check 567 * At this point we know, that inbound first_to_check
568 * has (probably) not moved (see qdio_inbound_processing). 568 * has (probably) not moved (see qdio_inbound_processing).
569 */ 569 */
570 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 570 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
572 q->first_to_check); 572 q->first_to_check);
573 return 1; 573 return 1;
@@ -606,7 +606,7 @@ static void qdio_kick_handler(struct qdio_q *q)
606static void __qdio_inbound_processing(struct qdio_q *q) 606static void __qdio_inbound_processing(struct qdio_q *q)
607{ 607{
608 qperf_inc(q, tasklet_inbound); 608 qperf_inc(q, tasklet_inbound);
609again: 609
610 if (!qdio_inbound_q_moved(q)) 610 if (!qdio_inbound_q_moved(q))
611 return; 611 return;
612 612
@@ -615,7 +615,10 @@ again:
615 if (!qdio_inbound_q_done(q)) { 615 if (!qdio_inbound_q_done(q)) {
616 /* means poll time is not yet over */ 616 /* means poll time is not yet over */
617 qperf_inc(q, tasklet_inbound_resched); 617 qperf_inc(q, tasklet_inbound_resched);
618 goto again; 618 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
619 tasklet_schedule(&q->tasklet);
620 return;
621 }
619 } 622 }
620 623
621 qdio_stop_polling(q); 624 qdio_stop_polling(q);
@@ -625,7 +628,8 @@ again:
625 */ 628 */
626 if (!qdio_inbound_q_done(q)) { 629 if (!qdio_inbound_q_done(q)) {
627 qperf_inc(q, tasklet_inbound_resched2); 630 qperf_inc(q, tasklet_inbound_resched2);
628 goto again; 631 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
632 tasklet_schedule(&q->tasklet);
629 } 633 }
630} 634}
631 635
@@ -955,6 +959,9 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
955 return; 959 return;
956 } 960 }
957 961
962 if (irq_ptr->perf_stat_enabled)
963 irq_ptr->perf_stat.qdio_int++;
964
958 if (IS_ERR(irb)) { 965 if (IS_ERR(irb)) {
959 switch (PTR_ERR(irb)) { 966 switch (PTR_ERR(irb)) {
960 case -EIO: 967 case -EIO:
@@ -1016,30 +1023,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
1016} 1023}
1017EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1024EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1018 1025
1019/**
1020 * qdio_cleanup - shutdown queues and free data structures
1021 * @cdev: associated ccw device
1022 * @how: use halt or clear to shutdown
1023 *
1024 * This function calls qdio_shutdown() for @cdev with method @how.
1025 * and qdio_free(). The qdio_free() return value is ignored since
1026 * !irq_ptr is already checked.
1027 */
1028int qdio_cleanup(struct ccw_device *cdev, int how)
1029{
1030 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1031 int rc;
1032
1033 if (!irq_ptr)
1034 return -ENODEV;
1035
1036 rc = qdio_shutdown(cdev, how);
1037
1038 qdio_free(cdev);
1039 return rc;
1040}
1041EXPORT_SYMBOL_GPL(qdio_cleanup);
1042
1043static void qdio_shutdown_queues(struct ccw_device *cdev) 1026static void qdio_shutdown_queues(struct ccw_device *cdev)
1044{ 1027{
1045 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1028 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1157,28 +1140,6 @@ int qdio_free(struct ccw_device *cdev)
1157EXPORT_SYMBOL_GPL(qdio_free); 1140EXPORT_SYMBOL_GPL(qdio_free);
1158 1141
1159/** 1142/**
1160 * qdio_initialize - allocate and establish queues for a qdio subchannel
1161 * @init_data: initialization data
1162 *
1163 * This function first allocates queues via qdio_allocate() and on success
1164 * establishes them via qdio_establish().
1165 */
1166int qdio_initialize(struct qdio_initialize *init_data)
1167{
1168 int rc;
1169
1170 rc = qdio_allocate(init_data);
1171 if (rc)
1172 return rc;
1173
1174 rc = qdio_establish(init_data);
1175 if (rc)
1176 qdio_free(init_data->cdev);
1177 return rc;
1178}
1179EXPORT_SYMBOL_GPL(qdio_initialize);
1180
1181/**
1182 * qdio_allocate - allocate qdio queues and associated data 1143 * qdio_allocate - allocate qdio queues and associated data
1183 * @init_data: initialization data 1144 * @init_data: initialization data
1184 */ 1145 */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 7f4a75465140..6326b67c45d2 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -106,10 +106,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
106static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, 106static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
107 qdio_handler_t *handler, int i) 107 qdio_handler_t *handler, int i)
108{ 108{
109 /* must be cleared by every qdio_establish */ 109 struct slib *slib = q->slib;
110 memset(q, 0, ((char *)&q->slib) - ((char *)q));
111 memset(q->slib, 0, PAGE_SIZE);
112 110
111 /* queue must be cleared for qdio_establish */
112 memset(q, 0, sizeof(*q));
113 memset(slib, 0, PAGE_SIZE);
114 q->slib = slib;
113 q->irq_ptr = irq_ptr; 115 q->irq_ptr = irq_ptr;
114 q->mask = 1 << (31 - i); 116 q->mask = 1 << (31 - i);
115 q->nr = i; 117 q->nr = i;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ce5f8910ff83..8daf1b99f153 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -95,7 +95,7 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
95 for_each_input_queue(irq_ptr, q, i) 95 for_each_input_queue(irq_ptr, q, i)
96 list_add_rcu(&q->entry, &tiq_list); 96 list_add_rcu(&q->entry, &tiq_list);
97 mutex_unlock(&tiq_list_lock); 97 mutex_unlock(&tiq_list_lock);
98 xchg(irq_ptr->dsci, 1); 98 xchg(irq_ptr->dsci, 1 << 7);
99} 99}
100 100
101void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 101void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -173,7 +173,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
173 173
174 /* prevent racing */ 174 /* prevent racing */
175 if (*tiqdio_alsi) 175 if (*tiqdio_alsi)
176 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 176 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
177 } 177 }
178} 178}
179 179
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 304caf549973..41e0aaefafd5 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -302,7 +302,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
302static int zcrypt_open(struct inode *inode, struct file *filp) 302static int zcrypt_open(struct inode *inode, struct file *filp)
303{ 303{
304 atomic_inc(&zcrypt_open_count); 304 atomic_inc(&zcrypt_open_count);
305 return 0; 305 return nonseekable_open(inode, filp);
306} 306}
307 307
308/** 308/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3ba738b2e271..28f71349fdec 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1292,13 +1292,14 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1292 QETH_QDIO_CLEANING)) { 1292 QETH_QDIO_CLEANING)) {
1293 case QETH_QDIO_ESTABLISHED: 1293 case QETH_QDIO_ESTABLISHED:
1294 if (card->info.type == QETH_CARD_TYPE_IQD) 1294 if (card->info.type == QETH_CARD_TYPE_IQD)
1295 rc = qdio_cleanup(CARD_DDEV(card), 1295 rc = qdio_shutdown(CARD_DDEV(card),
1296 QDIO_FLAG_CLEANUP_USING_HALT); 1296 QDIO_FLAG_CLEANUP_USING_HALT);
1297 else 1297 else
1298 rc = qdio_cleanup(CARD_DDEV(card), 1298 rc = qdio_shutdown(CARD_DDEV(card),
1299 QDIO_FLAG_CLEANUP_USING_CLEAR); 1299 QDIO_FLAG_CLEANUP_USING_CLEAR);
1300 if (rc) 1300 if (rc)
1301 QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); 1301 QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
1302 qdio_free(CARD_DDEV(card));
1302 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1303 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1303 break; 1304 break;
1304 case QETH_QDIO_CLEANING: 1305 case QETH_QDIO_CLEANING:
@@ -3810,10 +3811,18 @@ static int qeth_qdio_establish(struct qeth_card *card)
3810 3811
3811 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 3812 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3812 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 3813 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
3813 rc = qdio_initialize(&init_data); 3814 rc = qdio_allocate(&init_data);
3814 if (rc) 3815 if (rc) {
3816 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3817 goto out;
3818 }
3819 rc = qdio_establish(&init_data);
3820 if (rc) {
3815 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 3821 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3822 qdio_free(CARD_DDEV(card));
3823 }
3816 } 3824 }
3825out:
3817 kfree(out_sbal_ptrs); 3826 kfree(out_sbal_ptrs);
3818 kfree(in_sbal_ptrs); 3827 kfree(in_sbal_ptrs);
3819 kfree(qib_param_field); 3828 kfree(qib_param_field);
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 25d9e0ae9c57..1a2db0a35737 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -254,6 +254,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
254} 254}
255 255
256static const struct file_operations zfcp_cfdc_fops = { 256static const struct file_operations zfcp_cfdc_fops = {
257 .open = nonseekable_open,
257 .unlocked_ioctl = zfcp_cfdc_dev_ioctl, 258 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
258#ifdef CONFIG_COMPAT 259#ifdef CONFIG_COMPAT
259 .compat_ioctl = zfcp_cfdc_dev_ioctl 260 .compat_ioctl = zfcp_cfdc_dev_ioctl
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 19f255b97c86..d3b62eb0fba7 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -105,9 +105,9 @@ static ssize_t
105flash_read(struct file * file, char __user * buf, 105flash_read(struct file * file, char __user * buf,
106 size_t count, loff_t *ppos) 106 size_t count, loff_t *ppos)
107{ 107{
108 unsigned long p = file->f_pos; 108 loff_t p = *ppos;
109 int i; 109 int i;
110 110
111 if (count > flash.read_size - p) 111 if (count > flash.read_size - p)
112 count = flash.read_size - p; 112 count = flash.read_size - p;
113 113
@@ -118,7 +118,7 @@ flash_read(struct file * file, char __user * buf,
118 buf++; 118 buf++;
119 } 119 }
120 120
121 file->f_pos += count; 121 *ppos += count;
122 return count; 122 return count;
123} 123}
124 124
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 105449c15fa9..e17764d71476 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -69,6 +69,7 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
69 }, 69 },
70 { 0 } 70 { 0 }
71}; 71};
72MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
72 73
73static int __devinit zorro7xx_init_one(struct zorro_dev *z, 74static int __devinit zorro7xx_init_one(struct zorro_dev *z,
74 const struct zorro_device_id *ent) 75 const struct zorro_device_id *ent)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index f55c49475a8c..302836a80693 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -518,12 +518,13 @@ config SERIAL_S3C2412
518 Serial port support for the Samsung S3C2412 and S3C2413 SoC 518 Serial port support for the Samsung S3C2412 and S3C2413 SoC
519 519
520config SERIAL_S3C2440 520config SERIAL_S3C2440
521 tristate "Samsung S3C2440/S3C2442 Serial port support" 521 tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
522 depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442) 522 depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
523 default y if CPU_S3C2440 523 default y if CPU_S3C2440
524 default y if CPU_S3C2442 524 default y if CPU_S3C2442
525 select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
525 help 526 help
526 Serial port support for the Samsung S3C2440 and S3C2442 SoC 527 Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
527 528
528config SERIAL_S3C24A0 529config SERIAL_S3C24A0
529 tristate "Samsung S3C24A0 Serial port support" 530 tristate "Samsung S3C24A0 Serial port support"
@@ -533,21 +534,13 @@ config SERIAL_S3C24A0
533 Serial port support for the Samsung S3C24A0 SoC 534 Serial port support for the Samsung S3C24A0 SoC
534 535
535config SERIAL_S3C6400 536config SERIAL_S3C6400
536 tristate "Samsung S3C6400/S3C6410/S5P6440 Seria port support" 537 tristate "Samsung S3C6400/S3C6410/S5P6440/S5PC100 Serial port support"
537 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440) 538 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5PC100)
538 select SERIAL_SAMSUNG_UARTS_4 539 select SERIAL_SAMSUNG_UARTS_4
539 default y 540 default y
540 help 541 help
541 Serial port support for the Samsung S3C6400, S3C6410 and S5P6440 542 Serial port support for the Samsung S3C6400, S3C6410, S5P6440
542 SoCs 543 and S5PC100 SoCs
543
544config SERIAL_S5PC100
545 tristate "Samsung S5PC100 Serial port support"
546 depends on SERIAL_SAMSUNG && CPU_S5PC100
547 select SERIAL_SAMSUNG_UARTS_4
548 default y
549 help
550 Serial port support for the Samsung S5PC100 SoCs
551 544
552config SERIAL_S5PV210 545config SERIAL_S5PV210
553 tristate "Samsung S5PV210 Serial port support" 546 tristate "Samsung S5PV210 Serial port support"
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 6aa4723b74ee..328f107346c4 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
44obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o 44obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
45obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o 45obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
46obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o 46obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
47obj-$(CONFIG_SERIAL_S5PC100) += s3c6400.o
48obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o 47obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
49obj-$(CONFIG_SERIAL_MAX3100) += max3100.o 48obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
50obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o 49obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 2c9bf9b68327..eed3c2d8dd1c 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -38,6 +38,7 @@
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/atmel_pdc.h> 39#include <linux/atmel_pdc.h>
40#include <linux/atmel_serial.h> 40#include <linux/atmel_serial.h>
41#include <linux/uaccess.h>
41 42
42#include <asm/io.h> 43#include <asm/io.h>
43 44
@@ -59,6 +60,9 @@
59 60
60#include <linux/serial_core.h> 61#include <linux/serial_core.h>
61 62
63static void atmel_start_rx(struct uart_port *port);
64static void atmel_stop_rx(struct uart_port *port);
65
62#ifdef CONFIG_SERIAL_ATMEL_TTYAT 66#ifdef CONFIG_SERIAL_ATMEL_TTYAT
63 67
64/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 68/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
@@ -93,6 +97,7 @@
93#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR) 97#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
94#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR) 98#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
95#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR) 99#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
100#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
96 101
97 /* PDC registers */ 102 /* PDC registers */
98#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR) 103#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -147,6 +152,9 @@ struct atmel_uart_port {
147 unsigned int irq_status_prev; 152 unsigned int irq_status_prev;
148 153
149 struct circ_buf rx_ring; 154 struct circ_buf rx_ring;
155
156 struct serial_rs485 rs485; /* rs485 settings */
157 unsigned int tx_done_mask;
150}; 158};
151 159
152static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 160static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
@@ -187,6 +195,46 @@ static bool atmel_use_dma_tx(struct uart_port *port)
187} 195}
188#endif 196#endif
189 197
198/* Enable or disable the rs485 support */
199void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
200{
201 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
202 unsigned int mode;
203
204 spin_lock(&port->lock);
205
206 /* Disable interrupts */
207 UART_PUT_IDR(port, atmel_port->tx_done_mask);
208
209 mode = UART_GET_MR(port);
210
211 /* Resetting serial mode to RS232 (0x0) */
212 mode &= ~ATMEL_US_USMODE;
213
214 atmel_port->rs485 = *rs485conf;
215
216 if (rs485conf->flags & SER_RS485_ENABLED) {
217 dev_dbg(port->dev, "Setting UART to RS485\n");
218 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
219 UART_PUT_TTGR(port, rs485conf->delay_rts_before_send);
220 mode |= ATMEL_US_USMODE_RS485;
221 } else {
222 dev_dbg(port->dev, "Setting UART to RS232\n");
223 if (atmel_use_dma_tx(port))
224 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
225 ATMEL_US_TXBUFE;
226 else
227 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
228 }
229 UART_PUT_MR(port, mode);
230
231 /* Enable interrupts */
232 UART_PUT_IER(port, atmel_port->tx_done_mask);
233
234 spin_unlock(&port->lock);
235
236}
237
190/* 238/*
191 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 239 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
192 */ 240 */
@@ -202,6 +250,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
202{ 250{
203 unsigned int control = 0; 251 unsigned int control = 0;
204 unsigned int mode; 252 unsigned int mode;
253 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
205 254
206#ifdef CONFIG_ARCH_AT91RM9200 255#ifdef CONFIG_ARCH_AT91RM9200
207 if (cpu_is_at91rm9200()) { 256 if (cpu_is_at91rm9200()) {
@@ -236,6 +285,17 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
236 mode |= ATMEL_US_CHMODE_LOC_LOOP; 285 mode |= ATMEL_US_CHMODE_LOC_LOOP;
237 else 286 else
238 mode |= ATMEL_US_CHMODE_NORMAL; 287 mode |= ATMEL_US_CHMODE_NORMAL;
288
289 /* Resetting serial mode to RS232 (0x0) */
290 mode &= ~ATMEL_US_USMODE;
291
292 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
293 dev_dbg(port->dev, "Setting UART to RS485\n");
294 UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
295 mode |= ATMEL_US_USMODE_RS485;
296 } else {
297 dev_dbg(port->dev, "Setting UART to RS232\n");
298 }
239 UART_PUT_MR(port, mode); 299 UART_PUT_MR(port, mode);
240} 300}
241 301
@@ -268,12 +328,17 @@ static u_int atmel_get_mctrl(struct uart_port *port)
268 */ 328 */
269static void atmel_stop_tx(struct uart_port *port) 329static void atmel_stop_tx(struct uart_port *port)
270{ 330{
331 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
332
271 if (atmel_use_dma_tx(port)) { 333 if (atmel_use_dma_tx(port)) {
272 /* disable PDC transmit */ 334 /* disable PDC transmit */
273 UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); 335 UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
274 UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 336 }
275 } else 337 /* Disable interrupts */
276 UART_PUT_IDR(port, ATMEL_US_TXRDY); 338 UART_PUT_IDR(port, atmel_port->tx_done_mask);
339
340 if (atmel_port->rs485.flags & SER_RS485_ENABLED)
341 atmel_start_rx(port);
277} 342}
278 343
279/* 344/*
@@ -281,17 +346,39 @@ static void atmel_stop_tx(struct uart_port *port)
281 */ 346 */
282static void atmel_start_tx(struct uart_port *port) 347static void atmel_start_tx(struct uart_port *port)
283{ 348{
349 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
350
284 if (atmel_use_dma_tx(port)) { 351 if (atmel_use_dma_tx(port)) {
285 if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN) 352 if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
286 /* The transmitter is already running. Yes, we 353 /* The transmitter is already running. Yes, we
287 really need this.*/ 354 really need this.*/
288 return; 355 return;
289 356
290 UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 357 if (atmel_port->rs485.flags & SER_RS485_ENABLED)
358 atmel_stop_rx(port);
359
291 /* re-enable PDC transmit */ 360 /* re-enable PDC transmit */
292 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); 361 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
293 } else 362 }
294 UART_PUT_IER(port, ATMEL_US_TXRDY); 363 /* Enable interrupts */
364 UART_PUT_IER(port, atmel_port->tx_done_mask);
365}
366
367/*
368 * start receiving - port is in process of being opened.
369 */
370static void atmel_start_rx(struct uart_port *port)
371{
372 UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
373
374 if (atmel_use_dma_rx(port)) {
375 /* enable PDC controller */
376 UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
377 port->read_status_mask);
378 UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
379 } else {
380 UART_PUT_IER(port, ATMEL_US_RXRDY);
381 }
295} 382}
296 383
297/* 384/*
@@ -302,9 +389,11 @@ static void atmel_stop_rx(struct uart_port *port)
302 if (atmel_use_dma_rx(port)) { 389 if (atmel_use_dma_rx(port)) {
303 /* disable PDC receive */ 390 /* disable PDC receive */
304 UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS); 391 UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
305 UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 392 UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
306 } else 393 port->read_status_mask);
394 } else {
307 UART_PUT_IDR(port, ATMEL_US_RXRDY); 395 UART_PUT_IDR(port, ATMEL_US_RXRDY);
396 }
308} 397}
309 398
310/* 399/*
@@ -428,8 +517,9 @@ static void atmel_rx_chars(struct uart_port *port)
428static void atmel_tx_chars(struct uart_port *port) 517static void atmel_tx_chars(struct uart_port *port)
429{ 518{
430 struct circ_buf *xmit = &port->state->xmit; 519 struct circ_buf *xmit = &port->state->xmit;
520 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
431 521
432 if (port->x_char && UART_GET_CSR(port) & ATMEL_US_TXRDY) { 522 if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
433 UART_PUT_CHAR(port, port->x_char); 523 UART_PUT_CHAR(port, port->x_char);
434 port->icount.tx++; 524 port->icount.tx++;
435 port->x_char = 0; 525 port->x_char = 0;
@@ -437,7 +527,7 @@ static void atmel_tx_chars(struct uart_port *port)
437 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 527 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
438 return; 528 return;
439 529
440 while (UART_GET_CSR(port) & ATMEL_US_TXRDY) { 530 while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
441 UART_PUT_CHAR(port, xmit->buf[xmit->tail]); 531 UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
442 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 532 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
443 port->icount.tx++; 533 port->icount.tx++;
@@ -449,7 +539,8 @@ static void atmel_tx_chars(struct uart_port *port)
449 uart_write_wakeup(port); 539 uart_write_wakeup(port);
450 540
451 if (!uart_circ_empty(xmit)) 541 if (!uart_circ_empty(xmit))
452 UART_PUT_IER(port, ATMEL_US_TXRDY); 542 /* Enable interrupts */
543 UART_PUT_IER(port, atmel_port->tx_done_mask);
453} 544}
454 545
455/* 546/*
@@ -501,18 +592,10 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
501{ 592{
502 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 593 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
503 594
504 if (atmel_use_dma_tx(port)) { 595 if (pending & atmel_port->tx_done_mask) {
505 /* PDC transmit */ 596 /* Either PDC or interrupt transmission */
506 if (pending & (ATMEL_US_ENDTX | ATMEL_US_TXBUFE)) { 597 UART_PUT_IDR(port, atmel_port->tx_done_mask);
507 UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 598 tasklet_schedule(&atmel_port->tasklet);
508 tasklet_schedule(&atmel_port->tasklet);
509 }
510 } else {
511 /* Interrupt transmit */
512 if (pending & ATMEL_US_TXRDY) {
513 UART_PUT_IDR(port, ATMEL_US_TXRDY);
514 tasklet_schedule(&atmel_port->tasklet);
515 }
516 } 599 }
517} 600}
518 601
@@ -590,9 +673,15 @@ static void atmel_tx_dma(struct uart_port *port)
590 673
591 UART_PUT_TPR(port, pdc->dma_addr + xmit->tail); 674 UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
592 UART_PUT_TCR(port, count); 675 UART_PUT_TCR(port, count);
593 /* re-enable PDC transmit and interrupts */ 676 /* re-enable PDC transmit */
594 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); 677 UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
595 UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); 678 /* Enable interrupts */
679 UART_PUT_IER(port, atmel_port->tx_done_mask);
680 } else {
681 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
682 /* DMA done, stop TX, start RX for RS485 */
683 atmel_start_rx(port);
684 }
596 } 685 }
597 686
598 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 687 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1017,6 +1106,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1017{ 1106{
1018 unsigned long flags; 1107 unsigned long flags;
1019 unsigned int mode, imr, quot, baud; 1108 unsigned int mode, imr, quot, baud;
1109 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1020 1110
1021 /* Get current mode register */ 1111 /* Get current mode register */
1022 mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL 1112 mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
@@ -1115,6 +1205,17 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1115 /* disable receiver and transmitter */ 1205 /* disable receiver and transmitter */
1116 UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 1206 UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
1117 1207
1208 /* Resetting serial mode to RS232 (0x0) */
1209 mode &= ~ATMEL_US_USMODE;
1210
1211 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
1212 dev_dbg(port->dev, "Setting UART to RS485\n");
1213 UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
1214 mode |= ATMEL_US_USMODE_RS485;
1215 } else {
1216 dev_dbg(port->dev, "Setting UART to RS232\n");
1217 }
1218
1118 /* set the parity, stop bits and data size */ 1219 /* set the parity, stop bits and data size */
1119 UART_PUT_MR(port, mode); 1220 UART_PUT_MR(port, mode);
1120 1221
@@ -1231,6 +1332,35 @@ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
1231} 1332}
1232#endif 1333#endif
1233 1334
1335static int
1336atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
1337{
1338 struct serial_rs485 rs485conf;
1339
1340 switch (cmd) {
1341 case TIOCSRS485:
1342 if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
1343 sizeof(rs485conf)))
1344 return -EFAULT;
1345
1346 atmel_config_rs485(port, &rs485conf);
1347 break;
1348
1349 case TIOCGRS485:
1350 if (copy_to_user((struct serial_rs485 *) arg,
1351 &(to_atmel_uart_port(port)->rs485),
1352 sizeof(rs485conf)))
1353 return -EFAULT;
1354 break;
1355
1356 default:
1357 return -ENOIOCTLCMD;
1358 }
1359 return 0;
1360}
1361
1362
1363
1234static struct uart_ops atmel_pops = { 1364static struct uart_ops atmel_pops = {
1235 .tx_empty = atmel_tx_empty, 1365 .tx_empty = atmel_tx_empty,
1236 .set_mctrl = atmel_set_mctrl, 1366 .set_mctrl = atmel_set_mctrl,
@@ -1250,6 +1380,7 @@ static struct uart_ops atmel_pops = {
1250 .config_port = atmel_config_port, 1380 .config_port = atmel_config_port,
1251 .verify_port = atmel_verify_port, 1381 .verify_port = atmel_verify_port,
1252 .pm = atmel_serial_pm, 1382 .pm = atmel_serial_pm,
1383 .ioctl = atmel_ioctl,
1253#ifdef CONFIG_CONSOLE_POLL 1384#ifdef CONFIG_CONSOLE_POLL
1254 .poll_get_char = atmel_poll_get_char, 1385 .poll_get_char = atmel_poll_get_char,
1255 .poll_put_char = atmel_poll_put_char, 1386 .poll_put_char = atmel_poll_put_char,
@@ -1265,13 +1396,12 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1265 struct uart_port *port = &atmel_port->uart; 1396 struct uart_port *port = &atmel_port->uart;
1266 struct atmel_uart_data *data = pdev->dev.platform_data; 1397 struct atmel_uart_data *data = pdev->dev.platform_data;
1267 1398
1268 port->iotype = UPIO_MEM; 1399 port->iotype = UPIO_MEM;
1269 port->flags = UPF_BOOT_AUTOCONF; 1400 port->flags = UPF_BOOT_AUTOCONF;
1270 port->ops = &atmel_pops; 1401 port->ops = &atmel_pops;
1271 port->fifosize = 1; 1402 port->fifosize = 1;
1272 port->line = pdev->id; 1403 port->line = pdev->id;
1273 port->dev = &pdev->dev; 1404 port->dev = &pdev->dev;
1274
1275 port->mapbase = pdev->resource[0].start; 1405 port->mapbase = pdev->resource[0].start;
1276 port->irq = pdev->resource[1].start; 1406 port->irq = pdev->resource[1].start;
1277 1407
@@ -1299,8 +1429,16 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1299 1429
1300 atmel_port->use_dma_rx = data->use_dma_rx; 1430 atmel_port->use_dma_rx = data->use_dma_rx;
1301 atmel_port->use_dma_tx = data->use_dma_tx; 1431 atmel_port->use_dma_tx = data->use_dma_tx;
1302 if (atmel_use_dma_tx(port)) 1432 atmel_port->rs485 = data->rs485;
1433 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
1434 if (atmel_port->rs485.flags & SER_RS485_ENABLED)
1435 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
1436 else if (atmel_use_dma_tx(port)) {
1303 port->fifosize = PDC_BUFFER_SIZE; 1437 port->fifosize = PDC_BUFFER_SIZE;
1438 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
1439 } else {
1440 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
1441 }
1304} 1442}
1305 1443
1306/* 1444/*
@@ -1334,6 +1472,7 @@ static void atmel_console_putchar(struct uart_port *port, int ch)
1334static void atmel_console_write(struct console *co, const char *s, u_int count) 1472static void atmel_console_write(struct console *co, const char *s, u_int count)
1335{ 1473{
1336 struct uart_port *port = &atmel_ports[co->index].uart; 1474 struct uart_port *port = &atmel_ports[co->index].uart;
1475 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1337 unsigned int status, imr; 1476 unsigned int status, imr;
1338 unsigned int pdc_tx; 1477 unsigned int pdc_tx;
1339 1478
@@ -1341,7 +1480,7 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
1341 * First, save IMR and then disable interrupts 1480 * First, save IMR and then disable interrupts
1342 */ 1481 */
1343 imr = UART_GET_IMR(port); 1482 imr = UART_GET_IMR(port);
1344 UART_PUT_IDR(port, ATMEL_US_RXRDY | ATMEL_US_TXRDY); 1483 UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
1345 1484
1346 /* Store PDC transmit status and disable it */ 1485 /* Store PDC transmit status and disable it */
1347 pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN; 1486 pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 4315b23590bd..eacb588a9345 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -120,7 +120,8 @@
120#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */ 120#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
121#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ 121#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
122#define UCR3_BPEN (1<<0) /* Preset registers enable */ 122#define UCR3_BPEN (1<<0) /* Preset registers enable */
123#define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */ 123#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
124#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
124#define UCR4_INVR (1<<9) /* Inverted infrared reception */ 125#define UCR4_INVR (1<<9) /* Inverted infrared reception */
125#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ 126#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
126#define UCR4_WKEN (1<<7) /* Wake interrupt enable */ 127#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
@@ -591,6 +592,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
591 return 0; 592 return 0;
592} 593}
593 594
595/* half the RX buffer size */
596#define CTSTL 16
597
594static int imx_startup(struct uart_port *port) 598static int imx_startup(struct uart_port *port)
595{ 599{
596 struct imx_port *sport = (struct imx_port *)port; 600 struct imx_port *sport = (struct imx_port *)port;
@@ -607,6 +611,10 @@ static int imx_startup(struct uart_port *port)
607 if (USE_IRDA(sport)) 611 if (USE_IRDA(sport))
608 temp |= UCR4_IRSC; 612 temp |= UCR4_IRSC;
609 613
614 /* set the trigger level for CTS */
615 temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
616 temp |= CTSTL<< UCR4_CTSTL_SHF;
617
610 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 618 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
611 619
612 if (USE_IRDA(sport)) { 620 if (USE_IRDA(sport)) {
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index a176ab4bd65b..02469c31bf0b 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -1467,7 +1467,7 @@ mpc52xx_uart_init(void)
1467 /* 1467 /*
1468 * Map the PSC FIFO Controller and init if on MPC512x. 1468 * Map the PSC FIFO Controller and init if on MPC512x.
1469 */ 1469 */
1470 if (psc_ops->fifoc_init) { 1470 if (psc_ops && psc_ops->fifoc_init) {
1471 ret = psc_ops->fifoc_init(); 1471 ret = psc_ops->fifoc_init();
1472 if (ret) 1472 if (ret)
1473 return ret; 1473 return ret;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61b..8d993c4cceac 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
83 83
84 /* Interface clock */ 84 /* Interface clock */
85 struct clk *iclk; 85 struct clk *iclk;
86 /* Data clock */ 86 /* Function clock */
87 struct clk *dclk; 87 struct clk *fclk;
88 88
89 struct list_head node; 89 struct list_head node;
90 struct dma_chan *chan_tx; 90 struct dma_chan *chan_tx;
91 struct dma_chan *chan_rx; 91 struct dma_chan *chan_rx;
92#ifdef CONFIG_SERIAL_SH_SCI_DMA 92#ifdef CONFIG_SERIAL_SH_SCI_DMA
93 struct device *dma_dev; 93 struct device *dma_dev;
94 enum sh_dmae_slave_chan_id slave_tx; 94 unsigned int slave_tx;
95 enum sh_dmae_slave_chan_id slave_rx; 95 unsigned int slave_rx;
96 struct dma_async_tx_descriptor *desc_tx; 96 struct dma_async_tx_descriptor *desc_tx;
97 struct dma_async_tx_descriptor *desc_rx[2]; 97 struct dma_async_tx_descriptor *desc_rx[2];
98 dma_cookie_t cookie_tx; 98 dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
107 struct work_struct work_tx; 107 struct work_struct work_tx;
108 struct work_struct work_rx; 108 struct work_struct work_rx;
109 struct timer_list rx_timer; 109 struct timer_list rx_timer;
110 unsigned int rx_timeout;
110#endif 111#endif
111}; 112};
112 113
@@ -674,22 +675,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
674 struct sci_port *s = to_sci_port(port); 675 struct sci_port *s = to_sci_port(port);
675 676
676 if (s->chan_rx) { 677 if (s->chan_rx) {
677 unsigned long tout;
678 u16 scr = sci_in(port, SCSCR); 678 u16 scr = sci_in(port, SCSCR);
679 u16 ssr = sci_in(port, SCxSR); 679 u16 ssr = sci_in(port, SCxSR);
680 680
681 /* Disable future Rx interrupts */ 681 /* Disable future Rx interrupts */
682 sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); 682 if (port->type == PORT_SCIFA) {
683 disable_irq_nosync(irq);
684 scr |= 0x4000;
685 } else {
686 scr &= ~SCI_CTRL_FLAGS_RIE;
687 }
688 sci_out(port, SCSCR, scr);
683 /* Clear current interrupt */ 689 /* Clear current interrupt */
684 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 690 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
685 /* Calculate delay for 1.5 DMA buffers */ 691 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
686 tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 692 jiffies, s->rx_timeout);
687 port->fifosize / 2; 693 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
688 dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
689 tout * 1000 / HZ);
690 if (tout < 2)
691 tout = 2;
692 mod_timer(&s->rx_timer, jiffies + tout);
693 694
694 return IRQ_HANDLED; 695 return IRQ_HANDLED;
695 } 696 }
@@ -799,7 +800,7 @@ static int sci_notifier(struct notifier_block *self,
799 (phase == CPUFREQ_RESUMECHANGE)) { 800 (phase == CPUFREQ_RESUMECHANGE)) {
800 spin_lock_irqsave(&priv->lock, flags); 801 spin_lock_irqsave(&priv->lock, flags);
801 list_for_each_entry(sci_port, &priv->ports, node) 802 list_for_each_entry(sci_port, &priv->ports, node)
802 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 803 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
803 spin_unlock_irqrestore(&priv->lock, flags); 804 spin_unlock_irqrestore(&priv->lock, flags);
804 } 805 }
805 806
@@ -810,21 +811,17 @@ static void sci_clk_enable(struct uart_port *port)
810{ 811{
811 struct sci_port *sci_port = to_sci_port(port); 812 struct sci_port *sci_port = to_sci_port(port);
812 813
813 clk_enable(sci_port->dclk); 814 clk_enable(sci_port->iclk);
814 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 815 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
815 816 clk_enable(sci_port->fclk);
816 if (sci_port->iclk)
817 clk_enable(sci_port->iclk);
818} 817}
819 818
820static void sci_clk_disable(struct uart_port *port) 819static void sci_clk_disable(struct uart_port *port)
821{ 820{
822 struct sci_port *sci_port = to_sci_port(port); 821 struct sci_port *sci_port = to_sci_port(port);
823 822
824 if (sci_port->iclk) 823 clk_disable(sci_port->fclk);
825 clk_disable(sci_port->iclk); 824 clk_disable(sci_port->iclk);
826
827 clk_disable(sci_port->dclk);
828} 825}
829 826
830static int sci_request_irq(struct sci_port *port) 827static int sci_request_irq(struct sci_port *port)
@@ -913,22 +910,26 @@ static void sci_dma_tx_complete(void *arg)
913 910
914 spin_lock_irqsave(&port->lock, flags); 911 spin_lock_irqsave(&port->lock, flags);
915 912
916 xmit->tail += s->sg_tx.length; 913 xmit->tail += sg_dma_len(&s->sg_tx);
917 xmit->tail &= UART_XMIT_SIZE - 1; 914 xmit->tail &= UART_XMIT_SIZE - 1;
918 915
919 port->icount.tx += s->sg_tx.length; 916 port->icount.tx += sg_dma_len(&s->sg_tx);
920 917
921 async_tx_ack(s->desc_tx); 918 async_tx_ack(s->desc_tx);
922 s->cookie_tx = -EINVAL; 919 s->cookie_tx = -EINVAL;
923 s->desc_tx = NULL; 920 s->desc_tx = NULL;
924 921
925 spin_unlock_irqrestore(&port->lock, flags);
926
927 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 922 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
928 uart_write_wakeup(port); 923 uart_write_wakeup(port);
929 924
930 if (uart_circ_chars_pending(xmit)) 925 if (!uart_circ_empty(xmit)) {
931 schedule_work(&s->work_tx); 926 schedule_work(&s->work_tx);
927 } else if (port->type == PORT_SCIFA) {
928 u16 ctrl = sci_in(port, SCSCR);
929 sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
930 }
931
932 spin_unlock_irqrestore(&port->lock, flags);
932} 933}
933 934
934/* Locking: called with port lock held */ 935/* Locking: called with port lock held */
@@ -972,13 +973,13 @@ static void sci_dma_rx_complete(void *arg)
972 unsigned long flags; 973 unsigned long flags;
973 int count; 974 int count;
974 975
975 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 976 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
976 977
977 spin_lock_irqsave(&port->lock, flags); 978 spin_lock_irqsave(&port->lock, flags);
978 979
979 count = sci_dma_rx_push(s, tty, s->buf_len_rx); 980 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
980 981
981 mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); 982 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
982 983
983 spin_unlock_irqrestore(&port->lock, flags); 984 spin_unlock_irqrestore(&port->lock, flags);
984 985
@@ -1050,6 +1051,8 @@ static void sci_submit_rx(struct sci_port *s)
1050 sci_rx_dma_release(s, true); 1051 sci_rx_dma_release(s, true);
1051 return; 1052 return;
1052 } 1053 }
1054 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1055 s->cookie_rx[i], i);
1053 } 1056 }
1054 1057
1055 s->active_rx = s->cookie_rx[0]; 1058 s->active_rx = s->cookie_rx[0];
@@ -1107,10 +1110,10 @@ static void work_fn_rx(struct work_struct *work)
1107 return; 1110 return;
1108 } 1111 }
1109 1112
1110 dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
1111 s->cookie_rx[new], new);
1112
1113 s->active_rx = s->cookie_rx[!new]; 1113 s->active_rx = s->cookie_rx[!new];
1114
1115 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1116 s->cookie_rx[new], new, s->active_rx);
1114} 1117}
1115 1118
1116static void work_fn_tx(struct work_struct *work) 1119static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1134,13 @@ static void work_fn_tx(struct work_struct *work)
1131 */ 1134 */
1132 spin_lock_irq(&port->lock); 1135 spin_lock_irq(&port->lock);
1133 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1136 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1134 sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1137 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1135 sg->offset; 1138 sg->offset;
1136 sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1139 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1137 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1140 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1138 sg->dma_length = sg->length;
1139 spin_unlock_irq(&port->lock); 1141 spin_unlock_irq(&port->lock);
1140 1142
1141 BUG_ON(!sg->length); 1143 BUG_ON(!sg_dma_len(sg));
1142 1144
1143 desc = chan->device->device_prep_slave_sg(chan, 1145 desc = chan->device->device_prep_slave_sg(chan,
1144 sg, s->sg_len_tx, DMA_TO_DEVICE, 1146 sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1175,28 @@ static void work_fn_tx(struct work_struct *work)
1173 1175
1174static void sci_start_tx(struct uart_port *port) 1176static void sci_start_tx(struct uart_port *port)
1175{ 1177{
1178 struct sci_port *s = to_sci_port(port);
1176 unsigned short ctrl; 1179 unsigned short ctrl;
1177 1180
1178#ifdef CONFIG_SERIAL_SH_SCI_DMA 1181#ifdef CONFIG_SERIAL_SH_SCI_DMA
1179 struct sci_port *s = to_sci_port(port); 1182 if (port->type == PORT_SCIFA) {
1180 1183 u16 new, scr = sci_in(port, SCSCR);
1181 if (s->chan_tx) { 1184 if (s->chan_tx)
1182 if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) 1185 new = scr | 0x8000;
1183 schedule_work(&s->work_tx); 1186 else
1184 1187 new = scr & ~0x8000;
1185 return; 1188 if (new != scr)
1189 sci_out(port, SCSCR, new);
1186 } 1190 }
1191 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1192 s->cookie_tx < 0)
1193 schedule_work(&s->work_tx);
1187#endif 1194#endif
1188 1195 if (!s->chan_tx || port->type == PORT_SCIFA) {
1189 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1196 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1190 ctrl = sci_in(port, SCSCR); 1197 ctrl = sci_in(port, SCSCR);
1191 ctrl |= SCI_CTRL_FLAGS_TIE; 1198 sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
1192 sci_out(port, SCSCR, ctrl); 1199 }
1193} 1200}
1194 1201
1195static void sci_stop_tx(struct uart_port *port) 1202static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1205,8 @@ static void sci_stop_tx(struct uart_port *port)
1198 1205
1199 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1206 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1200 ctrl = sci_in(port, SCSCR); 1207 ctrl = sci_in(port, SCSCR);
1208 if (port->type == PORT_SCIFA)
1209 ctrl &= ~0x8000;
1201 ctrl &= ~SCI_CTRL_FLAGS_TIE; 1210 ctrl &= ~SCI_CTRL_FLAGS_TIE;
1202 sci_out(port, SCSCR, ctrl); 1211 sci_out(port, SCSCR, ctrl);
1203} 1212}
@@ -1208,6 +1217,8 @@ static void sci_start_rx(struct uart_port *port)
1208 1217
1209 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1218 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
1210 ctrl |= sci_in(port, SCSCR); 1219 ctrl |= sci_in(port, SCSCR);
1220 if (port->type == PORT_SCIFA)
1221 ctrl &= ~0x4000;
1211 sci_out(port, SCSCR, ctrl); 1222 sci_out(port, SCSCR, ctrl);
1212} 1223}
1213 1224
@@ -1217,6 +1228,8 @@ static void sci_stop_rx(struct uart_port *port)
1217 1228
1218 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ 1229 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
1219 ctrl = sci_in(port, SCSCR); 1230 ctrl = sci_in(port, SCSCR);
1231 if (port->type == PORT_SCIFA)
1232 ctrl &= ~0x4000;
1220 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); 1233 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
1221 sci_out(port, SCSCR, ctrl); 1234 sci_out(port, SCSCR, ctrl);
1222} 1235}
@@ -1251,8 +1264,12 @@ static void rx_timer_fn(unsigned long arg)
1251{ 1264{
1252 struct sci_port *s = (struct sci_port *)arg; 1265 struct sci_port *s = (struct sci_port *)arg;
1253 struct uart_port *port = &s->port; 1266 struct uart_port *port = &s->port;
1254
1255 u16 scr = sci_in(port, SCSCR); 1267 u16 scr = sci_in(port, SCSCR);
1268
1269 if (port->type == PORT_SCIFA) {
1270 scr &= ~0x4000;
1271 enable_irq(s->irqs[1]);
1272 }
1256 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); 1273 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
1257 dev_dbg(port->dev, "DMA Rx timed out\n"); 1274 dev_dbg(port->dev, "DMA Rx timed out\n");
1258 schedule_work(&s->work_rx); 1275 schedule_work(&s->work_rx);
@@ -1339,8 +1356,7 @@ static void sci_request_dma(struct uart_port *port)
1339 sg_init_table(sg, 1); 1356 sg_init_table(sg, 1);
1340 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1357 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1341 (int)buf[i] & ~PAGE_MASK); 1358 (int)buf[i] & ~PAGE_MASK);
1342 sg->dma_address = dma[i]; 1359 sg_dma_address(sg) = dma[i];
1343 sg->dma_length = sg->length;
1344 } 1360 }
1345 1361
1346 INIT_WORK(&s->work_rx, work_fn_rx); 1362 INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1419,12 @@ static void sci_shutdown(struct uart_port *port)
1403static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1419static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1404 struct ktermios *old) 1420 struct ktermios *old)
1405{ 1421{
1422#ifdef CONFIG_SERIAL_SH_SCI_DMA
1423 struct sci_port *s = to_sci_port(port);
1424#endif
1406 unsigned int status, baud, smr_val, max_baud; 1425 unsigned int status, baud, smr_val, max_baud;
1407 int t = -1; 1426 int t = -1;
1427 u16 scfcr = 0;
1408 1428
1409 /* 1429 /*
1410 * earlyprintk comes here early on with port->uartclk set to zero. 1430 * earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1447,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1427 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1447 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1428 1448
1429 if (port->type != PORT_SCI) 1449 if (port->type != PORT_SCI)
1430 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1450 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1431 1451
1432 smr_val = sci_in(port, SCSMR) & 3; 1452 smr_val = sci_in(port, SCSMR) & 3;
1433 if ((termios->c_cflag & CSIZE) == CS7) 1453 if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1478,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1458 } 1478 }
1459 1479
1460 sci_init_pins(port, termios->c_cflag); 1480 sci_init_pins(port, termios->c_cflag);
1461 sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); 1481 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1462 1482
1463 sci_out(port, SCSCR, SCSCR_INIT(port)); 1483 sci_out(port, SCSCR, SCSCR_INIT(port));
1464 1484
1485#ifdef CONFIG_SERIAL_SH_SCI_DMA
1486 /*
1487 * Calculate delay for 1.5 DMA buffers: see
1488 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1489 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1490 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1491 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1492 * sizes), but it has been found out experimentally, that this is not
1493 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1494 * as a minimum seem to work perfectly.
1495 */
1496 if (s->chan_rx) {
1497 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1498 port->fifosize / 2;
1499 dev_dbg(port->dev,
1500 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1501 s->rx_timeout * 1000 / HZ, port->timeout);
1502 if (s->rx_timeout < msecs_to_jiffies(20))
1503 s->rx_timeout = msecs_to_jiffies(20);
1504 }
1505#endif
1506
1465 if ((termios->c_cflag & CREAD) != 0) 1507 if ((termios->c_cflag & CREAD) != 0)
1466 sci_start_rx(port); 1508 sci_start_rx(port);
1467} 1509}
@@ -1553,10 +1595,10 @@ static struct uart_ops sci_uart_ops = {
1553#endif 1595#endif
1554}; 1596};
1555 1597
1556static void __devinit sci_init_single(struct platform_device *dev, 1598static int __devinit sci_init_single(struct platform_device *dev,
1557 struct sci_port *sci_port, 1599 struct sci_port *sci_port,
1558 unsigned int index, 1600 unsigned int index,
1559 struct plat_sci_port *p) 1601 struct plat_sci_port *p)
1560{ 1602{
1561 struct uart_port *port = &sci_port->port; 1603 struct uart_port *port = &sci_port->port;
1562 1604
@@ -1577,8 +1619,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
1577 } 1619 }
1578 1620
1579 if (dev) { 1621 if (dev) {
1580 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1622 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1581 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1623 if (IS_ERR(sci_port->iclk)) {
1624 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1625 if (IS_ERR(sci_port->iclk)) {
1626 dev_err(&dev->dev, "can't get iclk\n");
1627 return PTR_ERR(sci_port->iclk);
1628 }
1629 }
1630
1631 /*
1632 * The function clock is optional, ignore it if we can't
1633 * find it.
1634 */
1635 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1636 if (IS_ERR(sci_port->fclk))
1637 sci_port->fclk = NULL;
1638
1582 sci_port->enable = sci_clk_enable; 1639 sci_port->enable = sci_clk_enable;
1583 sci_port->disable = sci_clk_disable; 1640 sci_port->disable = sci_clk_disable;
1584 port->dev = &dev->dev; 1641 port->dev = &dev->dev;
@@ -1605,6 +1662,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
1605#endif 1662#endif
1606 1663
1607 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1664 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1665 return 0;
1608} 1666}
1609 1667
1610#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1668#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1812,11 @@ static int sci_remove(struct platform_device *dev)
1754 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); 1812 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1755 1813
1756 spin_lock_irqsave(&priv->lock, flags); 1814 spin_lock_irqsave(&priv->lock, flags);
1757 list_for_each_entry(p, &priv->ports, node) 1815 list_for_each_entry(p, &priv->ports, node) {
1758 uart_remove_one_port(&sci_uart_driver, &p->port); 1816 uart_remove_one_port(&sci_uart_driver, &p->port);
1817 clk_put(p->iclk);
1818 clk_put(p->fclk);
1819 }
1759 spin_unlock_irqrestore(&priv->lock, flags); 1820 spin_unlock_irqrestore(&priv->lock, flags);
1760 1821
1761 kfree(priv); 1822 kfree(priv);
@@ -1781,7 +1842,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
1781 return 0; 1842 return 0;
1782 } 1843 }
1783 1844
1784 sci_init_single(dev, sciport, index, p); 1845 ret = sci_init_single(dev, sciport, index, p);
1846 if (ret)
1847 return ret;
1785 1848
1786 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 1849 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1787 if (ret) 1850 if (ret)
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
new file mode 100644
index 000000000000..a54de0b9b3df
--- /dev/null
+++ b/drivers/sh/Kconfig
@@ -0,0 +1,24 @@
1config INTC_USERIMASK
2 bool "Userspace interrupt masking support"
3 depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
4 help
5 This enables support for hardware-assisted userspace hardirq
6 masking.
7
8 SH-4A and newer interrupt blocks all support a special shadowed
9 page with all non-masking registers obscured when mapped in to
10 userspace. This is primarily for use by userspace device
11 drivers that are using special priority levels.
12
13 If in doubt, say N.
14
15config INTC_BALANCING
16 bool "Hardware IRQ balancing support"
17 depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
18 help
19 This enables support for IRQ auto-distribution mode on SH-X3
20 SMP parts. All of the balancing and CPU wakeup decisions are
21 taken care of automatically by hardware for distributed
22 vectors.
23
24 If in doubt, say N.
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 4956bf1f2134..78bb5127abd0 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -4,4 +4,6 @@
4obj-$(CONFIG_SUPERHYWAY) += superhyway/ 4obj-$(CONFIG_SUPERHYWAY) += superhyway/
5obj-$(CONFIG_MAPLE) += maple/ 5obj-$(CONFIG_MAPLE) += maple/
6obj-$(CONFIG_GENERIC_GPIO) += pfc.o 6obj-$(CONFIG_GENERIC_GPIO) += pfc.o
7obj-$(CONFIG_SUPERH) += clk.o
8obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
7obj-y += intc.o 9obj-y += intc.o
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c
new file mode 100644
index 000000000000..f5c80ba9ab1c
--- /dev/null
+++ b/drivers/sh/clk-cpg.c
@@ -0,0 +1,298 @@
1#include <linux/clk.h>
2#include <linux/compiler.h>
3#include <linux/slab.h>
4#include <linux/io.h>
5#include <linux/sh_clk.h>
6
7static int sh_clk_mstp32_enable(struct clk *clk)
8{
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10 clk->enable_reg);
11 return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17 clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28 struct clk *clkp;
29 int ret = 0;
30 int k;
31
32 for (k = 0; !ret && (k < nr); k++) {
33 clkp = clks + k;
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
36 }
37
38 return ret;
39}
40
41static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
42{
43 return clk_rate_table_round(clk, clk->freq_table, rate);
44}
45
46static int sh_clk_div6_divisors[64] = {
47 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
48 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
49 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
51};
52
53static struct clk_div_mult_table sh_clk_div6_table = {
54 .divisors = sh_clk_div6_divisors,
55 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
56};
57
58static unsigned long sh_clk_div6_recalc(struct clk *clk)
59{
60 struct clk_div_mult_table *table = &sh_clk_div6_table;
61 unsigned int idx;
62
63 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
64 table, NULL);
65
66 idx = __raw_readl(clk->enable_reg) & 0x003f;
67
68 return clk->freq_table[idx].frequency;
69}
70
71static int sh_clk_div6_set_rate(struct clk *clk,
72 unsigned long rate, int algo_id)
73{
74 unsigned long value;
75 int idx;
76
77 idx = clk_rate_table_find(clk, clk->freq_table, rate);
78 if (idx < 0)
79 return idx;
80
81 value = __raw_readl(clk->enable_reg);
82 value &= ~0x3f;
83 value |= idx;
84 __raw_writel(value, clk->enable_reg);
85 return 0;
86}
87
88static int sh_clk_div6_enable(struct clk *clk)
89{
90 unsigned long value;
91 int ret;
92
93 ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
94 if (ret == 0) {
95 value = __raw_readl(clk->enable_reg);
96 value &= ~0x100; /* clear stop bit to enable clock */
97 __raw_writel(value, clk->enable_reg);
98 }
99 return ret;
100}
101
102static void sh_clk_div6_disable(struct clk *clk)
103{
104 unsigned long value;
105
106 value = __raw_readl(clk->enable_reg);
107 value |= 0x100; /* stop clock */
108 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
109 __raw_writel(value, clk->enable_reg);
110}
111
112static struct clk_ops sh_clk_div6_clk_ops = {
113 .recalc = sh_clk_div6_recalc,
114 .round_rate = sh_clk_div_round_rate,
115 .set_rate = sh_clk_div6_set_rate,
116 .enable = sh_clk_div6_enable,
117 .disable = sh_clk_div6_disable,
118};
119
120int __init sh_clk_div6_register(struct clk *clks, int nr)
121{
122 struct clk *clkp;
123 void *freq_table;
124 int nr_divs = sh_clk_div6_table.nr_divisors;
125 int freq_table_size = sizeof(struct cpufreq_frequency_table);
126 int ret = 0;
127 int k;
128
129 freq_table_size *= (nr_divs + 1);
130 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
131 if (!freq_table) {
132 pr_err("sh_clk_div6_register: unable to alloc memory\n");
133 return -ENOMEM;
134 }
135
136 for (k = 0; !ret && (k < nr); k++) {
137 clkp = clks + k;
138
139 clkp->ops = &sh_clk_div6_clk_ops;
140 clkp->id = -1;
141 clkp->freq_table = freq_table + (k * freq_table_size);
142 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
143
144 ret = clk_register(clkp);
145 }
146
147 return ret;
148}
149
150static unsigned long sh_clk_div4_recalc(struct clk *clk)
151{
152 struct clk_div4_table *d4t = clk->priv;
153 struct clk_div_mult_table *table = d4t->div_mult_table;
154 unsigned int idx;
155
156 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
157 table, &clk->arch_flags);
158
159 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
160
161 return clk->freq_table[idx].frequency;
162}
163
164static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
165{
166 struct clk_div4_table *d4t = clk->priv;
167 struct clk_div_mult_table *table = d4t->div_mult_table;
168 u32 value;
169 int ret;
170
171 /* we really need a better way to determine parent index, but for
172 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
173 * no CLK_ENABLE_ON_INIT means external clock...
174 */
175
176 if (parent->flags & CLK_ENABLE_ON_INIT)
177 value = __raw_readl(clk->enable_reg) & ~(1 << 7);
178 else
179 value = __raw_readl(clk->enable_reg) | (1 << 7);
180
181 ret = clk_reparent(clk, parent);
182 if (ret < 0)
183 return ret;
184
185 __raw_writel(value, clk->enable_reg);
186
187 /* Rebiuld the frequency table */
188 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
189 table, &clk->arch_flags);
190
191 return 0;
192}
193
194static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
195{
196 struct clk_div4_table *d4t = clk->priv;
197 unsigned long value;
198 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
199 if (idx < 0)
200 return idx;
201
202 value = __raw_readl(clk->enable_reg);
203 value &= ~(0xf << clk->enable_bit);
204 value |= (idx << clk->enable_bit);
205 __raw_writel(value, clk->enable_reg);
206
207 if (d4t->kick)
208 d4t->kick(clk);
209
210 return 0;
211}
212
213static int sh_clk_div4_enable(struct clk *clk)
214{
215 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
216 return 0;
217}
218
219static void sh_clk_div4_disable(struct clk *clk)
220{
221 __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
222}
223
224static struct clk_ops sh_clk_div4_clk_ops = {
225 .recalc = sh_clk_div4_recalc,
226 .set_rate = sh_clk_div4_set_rate,
227 .round_rate = sh_clk_div_round_rate,
228};
229
230static struct clk_ops sh_clk_div4_enable_clk_ops = {
231 .recalc = sh_clk_div4_recalc,
232 .set_rate = sh_clk_div4_set_rate,
233 .round_rate = sh_clk_div_round_rate,
234 .enable = sh_clk_div4_enable,
235 .disable = sh_clk_div4_disable,
236};
237
238static struct clk_ops sh_clk_div4_reparent_clk_ops = {
239 .recalc = sh_clk_div4_recalc,
240 .set_rate = sh_clk_div4_set_rate,
241 .round_rate = sh_clk_div_round_rate,
242 .enable = sh_clk_div4_enable,
243 .disable = sh_clk_div4_disable,
244 .set_parent = sh_clk_div4_set_parent,
245};
246
247static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
248 struct clk_div4_table *table, struct clk_ops *ops)
249{
250 struct clk *clkp;
251 void *freq_table;
252 int nr_divs = table->div_mult_table->nr_divisors;
253 int freq_table_size = sizeof(struct cpufreq_frequency_table);
254 int ret = 0;
255 int k;
256
257 freq_table_size *= (nr_divs + 1);
258 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
259 if (!freq_table) {
260 pr_err("sh_clk_div4_register: unable to alloc memory\n");
261 return -ENOMEM;
262 }
263
264 for (k = 0; !ret && (k < nr); k++) {
265 clkp = clks + k;
266
267 clkp->ops = ops;
268 clkp->id = -1;
269 clkp->priv = table;
270
271 clkp->freq_table = freq_table + (k * freq_table_size);
272 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
273
274 ret = clk_register(clkp);
275 }
276
277 return ret;
278}
279
280int __init sh_clk_div4_register(struct clk *clks, int nr,
281 struct clk_div4_table *table)
282{
283 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
284}
285
286int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
287 struct clk_div4_table *table)
288{
289 return sh_clk_div4_register_ops(clks, nr, table,
290 &sh_clk_div4_enable_clk_ops);
291}
292
293int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
294 struct clk_div4_table *table)
295{
296 return sh_clk_div4_register_ops(clks, nr, table,
297 &sh_clk_div4_reparent_clk_ops);
298}
diff --git a/drivers/sh/clk.c b/drivers/sh/clk.c
new file mode 100644
index 000000000000..5d84adac9ec4
--- /dev/null
+++ b/drivers/sh/clk.c
@@ -0,0 +1,545 @@
1/*
2 * drivers/sh/clk.c - SuperH clock framework
3 *
4 * Copyright (C) 2005 - 2009 Paul Mundt
5 *
6 * This clock framework is derived from the OMAP version by:
7 *
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/list.h>
22#include <linux/kobject.h>
23#include <linux/sysdev.h>
24#include <linux/seq_file.h>
25#include <linux/err.h>
26#include <linux/platform_device.h>
27#include <linux/debugfs.h>
28#include <linux/cpufreq.h>
29#include <linux/clk.h>
30#include <linux/sh_clk.h>
31
32static LIST_HEAD(clock_list);
33static DEFINE_SPINLOCK(clock_lock);
34static DEFINE_MUTEX(clock_list_sem);
35
36void clk_rate_table_build(struct clk *clk,
37 struct cpufreq_frequency_table *freq_table,
38 int nr_freqs,
39 struct clk_div_mult_table *src_table,
40 unsigned long *bitmap)
41{
42 unsigned long mult, div;
43 unsigned long freq;
44 int i;
45
46 for (i = 0; i < nr_freqs; i++) {
47 div = 1;
48 mult = 1;
49
50 if (src_table->divisors && i < src_table->nr_divisors)
51 div = src_table->divisors[i];
52
53 if (src_table->multipliers && i < src_table->nr_multipliers)
54 mult = src_table->multipliers[i];
55
56 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
57 freq = CPUFREQ_ENTRY_INVALID;
58 else
59 freq = clk->parent->rate * mult / div;
60
61 freq_table[i].index = i;
62 freq_table[i].frequency = freq;
63 }
64
65 /* Termination entry */
66 freq_table[i].index = i;
67 freq_table[i].frequency = CPUFREQ_TABLE_END;
68}
69
70long clk_rate_table_round(struct clk *clk,
71 struct cpufreq_frequency_table *freq_table,
72 unsigned long rate)
73{
74 unsigned long rate_error, rate_error_prev = ~0UL;
75 unsigned long rate_best_fit = rate;
76 unsigned long highest, lowest;
77 int i;
78
79 highest = lowest = 0;
80
81 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
82 unsigned long freq = freq_table[i].frequency;
83
84 if (freq == CPUFREQ_ENTRY_INVALID)
85 continue;
86
87 if (freq > highest)
88 highest = freq;
89 if (freq < lowest)
90 lowest = freq;
91
92 rate_error = abs(freq - rate);
93 if (rate_error < rate_error_prev) {
94 rate_best_fit = freq;
95 rate_error_prev = rate_error;
96 }
97
98 if (rate_error == 0)
99 break;
100 }
101
102 if (rate >= highest)
103 rate_best_fit = highest;
104 if (rate <= lowest)
105 rate_best_fit = lowest;
106
107 return rate_best_fit;
108}
109
110int clk_rate_table_find(struct clk *clk,
111 struct cpufreq_frequency_table *freq_table,
112 unsigned long rate)
113{
114 int i;
115
116 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
117 unsigned long freq = freq_table[i].frequency;
118
119 if (freq == CPUFREQ_ENTRY_INVALID)
120 continue;
121
122 if (freq == rate)
123 return i;
124 }
125
126 return -ENOENT;
127}
128
129/* Used for clocks that always have same value as the parent clock */
130unsigned long followparent_recalc(struct clk *clk)
131{
132 return clk->parent ? clk->parent->rate : 0;
133}
134
135int clk_reparent(struct clk *child, struct clk *parent)
136{
137 list_del_init(&child->sibling);
138 if (parent)
139 list_add(&child->sibling, &parent->children);
140 child->parent = parent;
141
142 /* now do the debugfs renaming to reattach the child
143 to the proper parent */
144
145 return 0;
146}
147
148/* Propagate rate to children */
149void propagate_rate(struct clk *tclk)
150{
151 struct clk *clkp;
152
153 list_for_each_entry(clkp, &tclk->children, sibling) {
154 if (clkp->ops && clkp->ops->recalc)
155 clkp->rate = clkp->ops->recalc(clkp);
156
157 propagate_rate(clkp);
158 }
159}
160
161static void __clk_disable(struct clk *clk)
162{
163 if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n",
164 clk->name))
165 return;
166
167 if (!(--clk->usecount)) {
168 if (likely(clk->ops && clk->ops->disable))
169 clk->ops->disable(clk);
170 if (likely(clk->parent))
171 __clk_disable(clk->parent);
172 }
173}
174
175void clk_disable(struct clk *clk)
176{
177 unsigned long flags;
178
179 if (!clk)
180 return;
181
182 spin_lock_irqsave(&clock_lock, flags);
183 __clk_disable(clk);
184 spin_unlock_irqrestore(&clock_lock, flags);
185}
186EXPORT_SYMBOL_GPL(clk_disable);
187
188static int __clk_enable(struct clk *clk)
189{
190 int ret = 0;
191
192 if (clk->usecount++ == 0) {
193 if (clk->parent) {
194 ret = __clk_enable(clk->parent);
195 if (unlikely(ret))
196 goto err;
197 }
198
199 if (clk->ops && clk->ops->enable) {
200 ret = clk->ops->enable(clk);
201 if (ret) {
202 if (clk->parent)
203 __clk_disable(clk->parent);
204 goto err;
205 }
206 }
207 }
208
209 return ret;
210err:
211 clk->usecount--;
212 return ret;
213}
214
215int clk_enable(struct clk *clk)
216{
217 unsigned long flags;
218 int ret;
219
220 if (!clk)
221 return -EINVAL;
222
223 spin_lock_irqsave(&clock_lock, flags);
224 ret = __clk_enable(clk);
225 spin_unlock_irqrestore(&clock_lock, flags);
226
227 return ret;
228}
229EXPORT_SYMBOL_GPL(clk_enable);
230
231static LIST_HEAD(root_clks);
232
233/**
234 * recalculate_root_clocks - recalculate and propagate all root clocks
235 *
236 * Recalculates all root clocks (clocks with no parent), which if the
237 * clock's .recalc is set correctly, should also propagate their rates.
238 * Called at init.
239 */
240void recalculate_root_clocks(void)
241{
242 struct clk *clkp;
243
244 list_for_each_entry(clkp, &root_clks, sibling) {
245 if (clkp->ops && clkp->ops->recalc)
246 clkp->rate = clkp->ops->recalc(clkp);
247 propagate_rate(clkp);
248 }
249}
250
251int clk_register(struct clk *clk)
252{
253 if (clk == NULL || IS_ERR(clk))
254 return -EINVAL;
255
256 /*
257 * trap out already registered clocks
258 */
259 if (clk->node.next || clk->node.prev)
260 return 0;
261
262 mutex_lock(&clock_list_sem);
263
264 INIT_LIST_HEAD(&clk->children);
265 clk->usecount = 0;
266
267 if (clk->parent)
268 list_add(&clk->sibling, &clk->parent->children);
269 else
270 list_add(&clk->sibling, &root_clks);
271
272 list_add(&clk->node, &clock_list);
273 if (clk->ops && clk->ops->init)
274 clk->ops->init(clk);
275 mutex_unlock(&clock_list_sem);
276
277 return 0;
278}
279EXPORT_SYMBOL_GPL(clk_register);
280
281void clk_unregister(struct clk *clk)
282{
283 mutex_lock(&clock_list_sem);
284 list_del(&clk->sibling);
285 list_del(&clk->node);
286 mutex_unlock(&clock_list_sem);
287}
288EXPORT_SYMBOL_GPL(clk_unregister);
289
290void clk_enable_init_clocks(void)
291{
292 struct clk *clkp;
293
294 list_for_each_entry(clkp, &clock_list, node)
295 if (clkp->flags & CLK_ENABLE_ON_INIT)
296 clk_enable(clkp);
297}
298
299unsigned long clk_get_rate(struct clk *clk)
300{
301 return clk->rate;
302}
303EXPORT_SYMBOL_GPL(clk_get_rate);
304
305int clk_set_rate(struct clk *clk, unsigned long rate)
306{
307 return clk_set_rate_ex(clk, rate, 0);
308}
309EXPORT_SYMBOL_GPL(clk_set_rate);
310
311int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
312{
313 int ret = -EOPNOTSUPP;
314 unsigned long flags;
315
316 spin_lock_irqsave(&clock_lock, flags);
317
318 if (likely(clk->ops && clk->ops->set_rate)) {
319 ret = clk->ops->set_rate(clk, rate, algo_id);
320 if (ret != 0)
321 goto out_unlock;
322 } else {
323 clk->rate = rate;
324 ret = 0;
325 }
326
327 if (clk->ops && clk->ops->recalc)
328 clk->rate = clk->ops->recalc(clk);
329
330 propagate_rate(clk);
331
332out_unlock:
333 spin_unlock_irqrestore(&clock_lock, flags);
334
335 return ret;
336}
337EXPORT_SYMBOL_GPL(clk_set_rate_ex);
338
339int clk_set_parent(struct clk *clk, struct clk *parent)
340{
341 unsigned long flags;
342 int ret = -EINVAL;
343
344 if (!parent || !clk)
345 return ret;
346 if (clk->parent == parent)
347 return 0;
348
349 spin_lock_irqsave(&clock_lock, flags);
350 if (clk->usecount == 0) {
351 if (clk->ops->set_parent)
352 ret = clk->ops->set_parent(clk, parent);
353 else
354 ret = clk_reparent(clk, parent);
355
356 if (ret == 0) {
357 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
358 clk->name, clk->parent->name, clk->rate);
359 if (clk->ops->recalc)
360 clk->rate = clk->ops->recalc(clk);
361 propagate_rate(clk);
362 }
363 } else
364 ret = -EBUSY;
365 spin_unlock_irqrestore(&clock_lock, flags);
366
367 return ret;
368}
369EXPORT_SYMBOL_GPL(clk_set_parent);
370
371struct clk *clk_get_parent(struct clk *clk)
372{
373 return clk->parent;
374}
375EXPORT_SYMBOL_GPL(clk_get_parent);
376
377long clk_round_rate(struct clk *clk, unsigned long rate)
378{
379 if (likely(clk->ops && clk->ops->round_rate)) {
380 unsigned long flags, rounded;
381
382 spin_lock_irqsave(&clock_lock, flags);
383 rounded = clk->ops->round_rate(clk, rate);
384 spin_unlock_irqrestore(&clock_lock, flags);
385
386 return rounded;
387 }
388
389 return clk_get_rate(clk);
390}
391EXPORT_SYMBOL_GPL(clk_round_rate);
392
393#ifdef CONFIG_PM
394static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
395{
396 static pm_message_t prev_state;
397 struct clk *clkp;
398
399 switch (state.event) {
400 case PM_EVENT_ON:
401 /* Resumeing from hibernation */
402 if (prev_state.event != PM_EVENT_FREEZE)
403 break;
404
405 list_for_each_entry(clkp, &clock_list, node) {
406 if (likely(clkp->ops)) {
407 unsigned long rate = clkp->rate;
408
409 if (likely(clkp->ops->set_parent))
410 clkp->ops->set_parent(clkp,
411 clkp->parent);
412 if (likely(clkp->ops->set_rate))
413 clkp->ops->set_rate(clkp,
414 rate, NO_CHANGE);
415 else if (likely(clkp->ops->recalc))
416 clkp->rate = clkp->ops->recalc(clkp);
417 }
418 }
419 break;
420 case PM_EVENT_FREEZE:
421 break;
422 case PM_EVENT_SUSPEND:
423 break;
424 }
425
426 prev_state = state;
427 return 0;
428}
429
430static int clks_sysdev_resume(struct sys_device *dev)
431{
432 return clks_sysdev_suspend(dev, PMSG_ON);
433}
434
435static struct sysdev_class clks_sysdev_class = {
436 .name = "clks",
437};
438
439static struct sysdev_driver clks_sysdev_driver = {
440 .suspend = clks_sysdev_suspend,
441 .resume = clks_sysdev_resume,
442};
443
444static struct sys_device clks_sysdev_dev = {
445 .cls = &clks_sysdev_class,
446};
447
448static int __init clk_sysdev_init(void)
449{
450 sysdev_class_register(&clks_sysdev_class);
451 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
452 sysdev_register(&clks_sysdev_dev);
453
454 return 0;
455}
456subsys_initcall(clk_sysdev_init);
457#endif
458
459/*
460 * debugfs support to trace clock tree hierarchy and attributes
461 */
462static struct dentry *clk_debugfs_root;
463
464static int clk_debugfs_register_one(struct clk *c)
465{
466 int err;
467 struct dentry *d, *child, *child_tmp;
468 struct clk *pa = c->parent;
469 char s[255];
470 char *p = s;
471
472 p += sprintf(p, "%s", c->name);
473 if (c->id >= 0)
474 sprintf(p, ":%d", c->id);
475 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
476 if (!d)
477 return -ENOMEM;
478 c->dentry = d;
479
480 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
481 if (!d) {
482 err = -ENOMEM;
483 goto err_out;
484 }
485 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
486 if (!d) {
487 err = -ENOMEM;
488 goto err_out;
489 }
490 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
491 if (!d) {
492 err = -ENOMEM;
493 goto err_out;
494 }
495 return 0;
496
497err_out:
498 d = c->dentry;
499 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
500 debugfs_remove(child);
501 debugfs_remove(c->dentry);
502 return err;
503}
504
505static int clk_debugfs_register(struct clk *c)
506{
507 int err;
508 struct clk *pa = c->parent;
509
510 if (pa && !pa->dentry) {
511 err = clk_debugfs_register(pa);
512 if (err)
513 return err;
514 }
515
516 if (!c->dentry && c->name) {
517 err = clk_debugfs_register_one(c);
518 if (err)
519 return err;
520 }
521 return 0;
522}
523
524static int __init clk_debugfs_init(void)
525{
526 struct clk *c;
527 struct dentry *d;
528 int err;
529
530 d = debugfs_create_dir("clock", NULL);
531 if (!d)
532 return -ENOMEM;
533 clk_debugfs_root = d;
534
535 list_for_each_entry(c, &clock_list, node) {
536 err = clk_debugfs_register(c);
537 if (err)
538 goto err_out;
539 }
540 return 0;
541err_out:
542 debugfs_remove_recursive(clk_debugfs_root);
543 return err;
544}
545late_initcall(clk_debugfs_init);
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 94ad6bd86a00..c585574b9aed 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -28,6 +28,7 @@
28#include <linux/topology.h> 28#include <linux/topology.h>
29#include <linux/bitmap.h> 29#include <linux/bitmap.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <asm/sizes.h>
31 32
32#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 33#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 34 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -45,6 +46,12 @@ struct intc_handle_int {
45 unsigned long handle; 46 unsigned long handle;
46}; 47};
47 48
49struct intc_window {
50 phys_addr_t phys;
51 void __iomem *virt;
52 unsigned long size;
53};
54
48struct intc_desc_int { 55struct intc_desc_int {
49 struct list_head list; 56 struct list_head list;
50 struct sys_device sysdev; 57 struct sys_device sysdev;
@@ -58,6 +65,8 @@ struct intc_desc_int {
58 unsigned int nr_prio; 65 unsigned int nr_prio;
59 struct intc_handle_int *sense; 66 struct intc_handle_int *sense;
60 unsigned int nr_sense; 67 unsigned int nr_sense;
68 struct intc_window *window;
69 unsigned int nr_windows;
61 struct irq_chip chip; 70 struct irq_chip chip;
62}; 71};
63 72
@@ -87,8 +96,12 @@ static DEFINE_SPINLOCK(vector_lock);
87#define SMP_NR(d, x) 1 96#define SMP_NR(d, x) 1
88#endif 97#endif
89 98
90static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 99static unsigned int intc_prio_level[NR_IRQS]; /* for now */
100static unsigned int default_prio_level = 2; /* 2 - 16 */
91static unsigned long ack_handle[NR_IRQS]; 101static unsigned long ack_handle[NR_IRQS];
102#ifdef CONFIG_INTC_BALANCING
103static unsigned long dist_handle[NR_IRQS];
104#endif
92 105
93static inline struct intc_desc_int *get_intc_desc(unsigned int irq) 106static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
94{ 107{
@@ -96,6 +109,47 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
96 return container_of(chip, struct intc_desc_int, chip); 109 return container_of(chip, struct intc_desc_int, chip);
97} 110}
98 111
112static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
113 unsigned long address)
114{
115 struct intc_window *window;
116 int k;
117
118 /* scan through physical windows and convert address */
119 for (k = 0; k < d->nr_windows; k++) {
120 window = d->window + k;
121
122 if (address < window->phys)
123 continue;
124
125 if (address >= (window->phys + window->size))
126 continue;
127
128 address -= window->phys;
129 address += (unsigned long)window->virt;
130
131 return address;
132 }
133
134 /* no windows defined, register must be 1:1 mapped virt:phys */
135 return address;
136}
137
138static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
139{
140 unsigned int k;
141
142 address = intc_phys_to_virt(d, address);
143
144 for (k = 0; k < d->nr_reg; k++) {
145 if (d->reg[k] == address)
146 return k;
147 }
148
149 BUG();
150 return 0;
151}
152
99static inline unsigned int set_field(unsigned int value, 153static inline unsigned int set_field(unsigned int value,
100 unsigned int field_value, 154 unsigned int field_value,
101 unsigned int handle) 155 unsigned int handle)
@@ -229,6 +283,85 @@ static void (*intc_disable_fns[])(unsigned long addr,
229 [MODE_PCLR_REG] = intc_mode_field, 283 [MODE_PCLR_REG] = intc_mode_field,
230}; 284};
231 285
286#ifdef CONFIG_INTC_BALANCING
287static inline void intc_balancing_enable(unsigned int irq)
288{
289 struct intc_desc_int *d = get_intc_desc(irq);
290 unsigned long handle = dist_handle[irq];
291 unsigned long addr;
292
293 if (irq_balancing_disabled(irq) || !handle)
294 return;
295
296 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
297 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
298}
299
300static inline void intc_balancing_disable(unsigned int irq)
301{
302 struct intc_desc_int *d = get_intc_desc(irq);
303 unsigned long handle = dist_handle[irq];
304 unsigned long addr;
305
306 if (irq_balancing_disabled(irq) || !handle)
307 return;
308
309 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
310 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
311}
312
313static unsigned int intc_dist_data(struct intc_desc *desc,
314 struct intc_desc_int *d,
315 intc_enum enum_id)
316{
317 struct intc_mask_reg *mr = desc->hw.mask_regs;
318 unsigned int i, j, fn, mode;
319 unsigned long reg_e, reg_d;
320
321 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
322 mr = desc->hw.mask_regs + i;
323
324 /*
325 * Skip this entry if there's no auto-distribution
326 * register associated with it.
327 */
328 if (!mr->dist_reg)
329 continue;
330
331 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
332 if (mr->enum_ids[j] != enum_id)
333 continue;
334
335 fn = REG_FN_MODIFY_BASE;
336 mode = MODE_ENABLE_REG;
337 reg_e = mr->dist_reg;
338 reg_d = mr->dist_reg;
339
340 fn += (mr->reg_width >> 3) - 1;
341 return _INTC_MK(fn, mode,
342 intc_get_reg(d, reg_e),
343 intc_get_reg(d, reg_d),
344 1,
345 (mr->reg_width - 1) - j);
346 }
347 }
348
349 /*
350 * It's possible we've gotten here with no distribution options
351 * available for the IRQ in question, so we just skip over those.
352 */
353 return 0;
354}
355#else
356static inline void intc_balancing_enable(unsigned int irq)
357{
358}
359
360static inline void intc_balancing_disable(unsigned int irq)
361{
362}
363#endif
364
232static inline void _intc_enable(unsigned int irq, unsigned long handle) 365static inline void _intc_enable(unsigned int irq, unsigned long handle)
233{ 366{
234 struct intc_desc_int *d = get_intc_desc(irq); 367 struct intc_desc_int *d = get_intc_desc(irq);
@@ -244,6 +377,8 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
244 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ 377 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
245 [_INTC_FN(handle)], irq); 378 [_INTC_FN(handle)], irq);
246 } 379 }
380
381 intc_balancing_enable(irq);
247} 382}
248 383
249static void intc_enable(unsigned int irq) 384static void intc_enable(unsigned int irq)
@@ -254,10 +389,12 @@ static void intc_enable(unsigned int irq)
254static void intc_disable(unsigned int irq) 389static void intc_disable(unsigned int irq)
255{ 390{
256 struct intc_desc_int *d = get_intc_desc(irq); 391 struct intc_desc_int *d = get_intc_desc(irq);
257 unsigned long handle = (unsigned long) get_irq_chip_data(irq); 392 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
258 unsigned long addr; 393 unsigned long addr;
259 unsigned int cpu; 394 unsigned int cpu;
260 395
396 intc_balancing_disable(irq);
397
261 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 398 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
262#ifdef CONFIG_SMP 399#ifdef CONFIG_SMP
263 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) 400 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@ -336,8 +473,7 @@ static void intc_mask_ack(unsigned int irq)
336 473
337 intc_disable(irq); 474 intc_disable(irq);
338 475
339 /* read register and write zero only to the assocaited bit */ 476 /* read register and write zero only to the associated bit */
340
341 if (handle) { 477 if (handle) {
342 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); 478 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
343 switch (_INTC_FN(handle)) { 479 switch (_INTC_FN(handle)) {
@@ -366,7 +502,8 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
366{ 502{
367 int i; 503 int i;
368 504
369 /* this doesn't scale well, but... 505 /*
506 * this doesn't scale well, but...
370 * 507 *
371 * this function should only be used for cerain uncommon 508 * this function should only be used for cerain uncommon
372 * operations such as intc_set_priority() and intc_set_sense() 509 * operations such as intc_set_priority() and intc_set_sense()
@@ -377,7 +514,6 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
377 * memory footprint down is to make sure the array is sorted 514 * memory footprint down is to make sure the array is sorted
378 * and then perform a bisect to lookup the irq. 515 * and then perform a bisect to lookup the irq.
379 */ 516 */
380
381 for (i = 0; i < nr_hp; i++) { 517 for (i = 0; i < nr_hp; i++) {
382 if ((hp + i)->irq != irq) 518 if ((hp + i)->irq != irq)
383 continue; 519 continue;
@@ -408,7 +544,6 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
408 * primary masking method is using intc_prio_level[irq] 544 * primary masking method is using intc_prio_level[irq]
409 * priority level will be set during next enable() 545 * priority level will be set during next enable()
410 */ 546 */
411
412 if (_INTC_FN(ihp->handle) != REG_FN_ERR) 547 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
413 _intc_enable(irq, ihp->handle); 548 _intc_enable(irq, ihp->handle);
414 } 549 }
@@ -447,20 +582,6 @@ static int intc_set_sense(unsigned int irq, unsigned int type)
447 return 0; 582 return 0;
448} 583}
449 584
450static unsigned int __init intc_get_reg(struct intc_desc_int *d,
451 unsigned long address)
452{
453 unsigned int k;
454
455 for (k = 0; k < d->nr_reg; k++) {
456 if (d->reg[k] == address)
457 return k;
458 }
459
460 BUG();
461 return 0;
462}
463
464static intc_enum __init intc_grp_id(struct intc_desc *desc, 585static intc_enum __init intc_grp_id(struct intc_desc *desc,
465 intc_enum enum_id) 586 intc_enum enum_id)
466{ 587{
@@ -718,13 +839,14 @@ static void __init intc_register_irq(struct intc_desc *desc,
718 */ 839 */
719 set_bit(irq, intc_irq_map); 840 set_bit(irq, intc_irq_map);
720 841
721 /* Prefer single interrupt source bitmap over other combinations: 842 /*
843 * Prefer single interrupt source bitmap over other combinations:
844 *
722 * 1. bitmap, single interrupt source 845 * 1. bitmap, single interrupt source
723 * 2. priority, single interrupt source 846 * 2. priority, single interrupt source
724 * 3. bitmap, multiple interrupt sources (groups) 847 * 3. bitmap, multiple interrupt sources (groups)
725 * 4. priority, multiple interrupt sources (groups) 848 * 4. priority, multiple interrupt sources (groups)
726 */ 849 */
727
728 data[0] = intc_mask_data(desc, d, enum_id, 0); 850 data[0] = intc_mask_data(desc, d, enum_id, 0);
729 data[1] = intc_prio_data(desc, d, enum_id, 0); 851 data[1] = intc_prio_data(desc, d, enum_id, 0);
730 852
@@ -749,10 +871,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
749 handle_level_irq, "level"); 871 handle_level_irq, "level");
750 set_irq_chip_data(irq, (void *)data[primary]); 872 set_irq_chip_data(irq, (void *)data[primary]);
751 873
752 /* set priority level 874 /*
875 * set priority level
753 * - this needs to be at least 2 for 5-bit priorities on 7780 876 * - this needs to be at least 2 for 5-bit priorities on 7780
754 */ 877 */
755 intc_prio_level[irq] = 2; 878 intc_prio_level[irq] = default_prio_level;
756 879
757 /* enable secondary masking method if present */ 880 /* enable secondary masking method if present */
758 if (data[!primary]) 881 if (data[!primary])
@@ -769,7 +892,6 @@ static void __init intc_register_irq(struct intc_desc *desc,
769 * only secondary priority should access registers, so 892 * only secondary priority should access registers, so
770 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() 893 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
771 */ 894 */
772
773 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); 895 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
774 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); 896 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
775 } 897 }
@@ -790,6 +912,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
790 if (desc->hw.ack_regs) 912 if (desc->hw.ack_regs)
791 ack_handle[irq] = intc_ack_data(desc, d, enum_id); 913 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
792 914
915#ifdef CONFIG_INTC_BALANCING
916 if (desc->hw.mask_regs)
917 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
918#endif
919
793#ifdef CONFIG_ARM 920#ifdef CONFIG_ARM
794 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ 921 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
795#endif 922#endif
@@ -801,6 +928,8 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
801 unsigned int smp) 928 unsigned int smp)
802{ 929{
803 if (value) { 930 if (value) {
931 value = intc_phys_to_virt(d, value);
932
804 d->reg[cnt] = value; 933 d->reg[cnt] = value;
805#ifdef CONFIG_SMP 934#ifdef CONFIG_SMP
806 d->smp[cnt] = smp; 935 d->smp[cnt] = smp;
@@ -816,25 +945,59 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
816 generic_handle_irq((unsigned int)get_irq_data(irq)); 945 generic_handle_irq((unsigned int)get_irq_data(irq));
817} 946}
818 947
819void __init register_intc_controller(struct intc_desc *desc) 948int __init register_intc_controller(struct intc_desc *desc)
820{ 949{
821 unsigned int i, k, smp; 950 unsigned int i, k, smp;
822 struct intc_hw_desc *hw = &desc->hw; 951 struct intc_hw_desc *hw = &desc->hw;
823 struct intc_desc_int *d; 952 struct intc_desc_int *d;
953 struct resource *res;
954
955 pr_info("intc: Registered controller '%s' with %u IRQs\n",
956 desc->name, hw->nr_vectors);
824 957
825 d = kzalloc(sizeof(*d), GFP_NOWAIT); 958 d = kzalloc(sizeof(*d), GFP_NOWAIT);
959 if (!d)
960 goto err0;
826 961
827 INIT_LIST_HEAD(&d->list); 962 INIT_LIST_HEAD(&d->list);
828 list_add(&d->list, &intc_list); 963 list_add(&d->list, &intc_list);
829 964
965 if (desc->num_resources) {
966 d->nr_windows = desc->num_resources;
967 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
968 GFP_NOWAIT);
969 if (!d->window)
970 goto err1;
971
972 for (k = 0; k < d->nr_windows; k++) {
973 res = desc->resource + k;
974 WARN_ON(resource_type(res) != IORESOURCE_MEM);
975 d->window[k].phys = res->start;
976 d->window[k].size = resource_size(res);
977 d->window[k].virt = ioremap_nocache(res->start,
978 resource_size(res));
979 if (!d->window[k].virt)
980 goto err2;
981 }
982 }
983
830 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0; 984 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
985#ifdef CONFIG_INTC_BALANCING
986 if (d->nr_reg)
987 d->nr_reg += hw->nr_mask_regs;
988#endif
831 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; 989 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
832 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; 990 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
833 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; 991 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
834 992
835 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); 993 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
994 if (!d->reg)
995 goto err2;
996
836#ifdef CONFIG_SMP 997#ifdef CONFIG_SMP
837 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); 998 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
999 if (!d->smp)
1000 goto err3;
838#endif 1001#endif
839 k = 0; 1002 k = 0;
840 1003
@@ -843,12 +1006,17 @@ void __init register_intc_controller(struct intc_desc *desc)
843 smp = IS_SMP(hw->mask_regs[i]); 1006 smp = IS_SMP(hw->mask_regs[i]);
844 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp); 1007 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
845 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp); 1008 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
1009#ifdef CONFIG_INTC_BALANCING
1010 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1011#endif
846 } 1012 }
847 } 1013 }
848 1014
849 if (hw->prio_regs) { 1015 if (hw->prio_regs) {
850 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio), 1016 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
851 GFP_NOWAIT); 1017 GFP_NOWAIT);
1018 if (!d->prio)
1019 goto err4;
852 1020
853 for (i = 0; i < hw->nr_prio_regs; i++) { 1021 for (i = 0; i < hw->nr_prio_regs; i++) {
854 smp = IS_SMP(hw->prio_regs[i]); 1022 smp = IS_SMP(hw->prio_regs[i]);
@@ -860,6 +1028,8 @@ void __init register_intc_controller(struct intc_desc *desc)
860 if (hw->sense_regs) { 1028 if (hw->sense_regs) {
861 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense), 1029 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
862 GFP_NOWAIT); 1030 GFP_NOWAIT);
1031 if (!d->sense)
1032 goto err5;
863 1033
864 for (i = 0; i < hw->nr_sense_regs; i++) 1034 for (i = 0; i < hw->nr_sense_regs; i++)
865 k += save_reg(d, k, hw->sense_regs[i].reg, 0); 1035 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
@@ -906,7 +1076,7 @@ void __init register_intc_controller(struct intc_desc *desc)
906 1076
907 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id()); 1077 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
908 if (unlikely(!irq_desc)) { 1078 if (unlikely(!irq_desc)) {
909 pr_info("can't get irq_desc for %d\n", irq); 1079 pr_err("can't get irq_desc for %d\n", irq);
910 continue; 1080 continue;
911 } 1081 }
912 1082
@@ -926,7 +1096,7 @@ void __init register_intc_controller(struct intc_desc *desc)
926 */ 1096 */
927 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id()); 1097 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
928 if (unlikely(!irq_desc)) { 1098 if (unlikely(!irq_desc)) {
929 pr_info("can't get irq_desc for %d\n", irq2); 1099 pr_err("can't get irq_desc for %d\n", irq2);
930 continue; 1100 continue;
931 } 1101 }
932 1102
@@ -942,8 +1112,100 @@ void __init register_intc_controller(struct intc_desc *desc)
942 /* enable bits matching force_enable after registering irqs */ 1112 /* enable bits matching force_enable after registering irqs */
943 if (desc->force_enable) 1113 if (desc->force_enable)
944 intc_enable_disable_enum(desc, d, desc->force_enable, 1); 1114 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1115
1116 return 0;
1117err5:
1118 kfree(d->prio);
1119err4:
1120#ifdef CONFIG_SMP
1121 kfree(d->smp);
1122err3:
1123#endif
1124 kfree(d->reg);
1125err2:
1126 for (k = 0; k < d->nr_windows; k++)
1127 if (d->window[k].virt)
1128 iounmap(d->window[k].virt);
1129
1130 kfree(d->window);
1131err1:
1132 kfree(d);
1133err0:
1134 pr_err("unable to allocate INTC memory\n");
1135
1136 return -ENOMEM;
1137}
1138
1139#ifdef CONFIG_INTC_USERIMASK
1140static void __iomem *uimask;
1141
1142int register_intc_userimask(unsigned long addr)
1143{
1144 if (unlikely(uimask))
1145 return -EBUSY;
1146
1147 uimask = ioremap_nocache(addr, SZ_4K);
1148 if (unlikely(!uimask))
1149 return -ENOMEM;
1150
1151 pr_info("intc: userimask support registered for levels 0 -> %d\n",
1152 default_prio_level - 1);
1153
1154 return 0;
1155}
1156
1157static ssize_t
1158show_intc_userimask(struct sysdev_class *cls,
1159 struct sysdev_class_attribute *attr, char *buf)
1160{
1161 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1162}
1163
1164static ssize_t
1165store_intc_userimask(struct sysdev_class *cls,
1166 struct sysdev_class_attribute *attr,
1167 const char *buf, size_t count)
1168{
1169 unsigned long level;
1170
1171 level = simple_strtoul(buf, NULL, 10);
1172
1173 /*
1174 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1175 * these are chomped so as to not interfere with normal IRQs.
1176 *
1177 * Level 1 is a special case on some CPUs in that it's not
1178 * directly settable, but given that USERIMASK cuts off below a
1179 * certain level, we don't care about this limitation here.
1180 * Level 0 on the other hand equates to user masking disabled.
1181 *
1182 * We use default_prio_level as a cut off so that only special
1183 * case opt-in IRQs can be mangled.
1184 */
1185 if (level >= default_prio_level)
1186 return -EINVAL;
1187
1188 __raw_writel(0xa5 << 24 | level << 4, uimask);
1189
1190 return count;
945} 1191}
946 1192
1193static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1194 show_intc_userimask, store_intc_userimask);
1195#endif
1196
1197static ssize_t
1198show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1199{
1200 struct intc_desc_int *d;
1201
1202 d = container_of(dev, struct intc_desc_int, sysdev);
1203
1204 return sprintf(buf, "%s\n", d->chip.name);
1205}
1206
1207static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1208
947static int intc_suspend(struct sys_device *dev, pm_message_t state) 1209static int intc_suspend(struct sys_device *dev, pm_message_t state)
948{ 1210{
949 struct intc_desc_int *d; 1211 struct intc_desc_int *d;
@@ -1003,19 +1265,28 @@ static int __init register_intc_sysdevs(void)
1003 int id = 0; 1265 int id = 0;
1004 1266
1005 error = sysdev_class_register(&intc_sysdev_class); 1267 error = sysdev_class_register(&intc_sysdev_class);
1268#ifdef CONFIG_INTC_USERIMASK
1269 if (!error && uimask)
1270 error = sysdev_class_create_file(&intc_sysdev_class,
1271 &attr_userimask);
1272#endif
1006 if (!error) { 1273 if (!error) {
1007 list_for_each_entry(d, &intc_list, list) { 1274 list_for_each_entry(d, &intc_list, list) {
1008 d->sysdev.id = id; 1275 d->sysdev.id = id;
1009 d->sysdev.cls = &intc_sysdev_class; 1276 d->sysdev.cls = &intc_sysdev_class;
1010 error = sysdev_register(&d->sysdev); 1277 error = sysdev_register(&d->sysdev);
1278 if (error == 0)
1279 error = sysdev_create_file(&d->sysdev,
1280 &attr_name);
1011 if (error) 1281 if (error)
1012 break; 1282 break;
1283
1013 id++; 1284 id++;
1014 } 1285 }
1015 } 1286 }
1016 1287
1017 if (error) 1288 if (error)
1018 pr_warning("intc: sysdev registration error\n"); 1289 pr_err("intc: sysdev registration error\n");
1019 1290
1020 return error; 1291 return error;
1021} 1292}
@@ -1048,7 +1319,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
1048 1319
1049 desc = irq_to_desc_alloc_node(new, node); 1320 desc = irq_to_desc_alloc_node(new, node);
1050 if (unlikely(!desc)) { 1321 if (unlikely(!desc)) {
1051 pr_info("can't get irq_desc for %d\n", new); 1322 pr_err("can't get irq_desc for %d\n", new);
1052 goto out_unlock; 1323 goto out_unlock;
1053 } 1324 }
1054 1325
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 36828358a4d8..e76b1afafe07 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -36,8 +36,7 @@
36#include <asm/delay.h> 36#include <asm/delay.h>
37 37
38#include <mach/dma.h> 38#include <mach/dma.h>
39#include <mach/regs-ssp.h> 39#include <plat/ssp.h>
40#include <mach/ssp.h>
41#include <mach/pxa2xx_spi.h> 40#include <mach/pxa2xx_spi.h>
42 41
43MODULE_AUTHOR("Stephen Street"); 42MODULE_AUTHOR("Stephen Street");
@@ -1318,14 +1317,14 @@ static int setup(struct spi_device *spi)
1318 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1317 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1319 if (drv_data->ssp_type != PXA25x_SSP) 1318 if (drv_data->ssp_type != PXA25x_SSP)
1320 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1319 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1321 clk_get_rate(ssp->clk) 1320 clk_get_rate(ssp->clk)
1322 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1321 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1323 chip->enable_dma ? "DMA" : "PIO"); 1322 chip->enable_dma ? "DMA" : "PIO");
1324 else 1323 else
1325 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1324 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1326 clk_get_rate(ssp->clk) / 2 1325 clk_get_rate(ssp->clk) / 2
1327 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1326 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1328 chip->enable_dma ? "DMA" : "PIO"); 1327 chip->enable_dma ? "DMA" : "PIO");
1329 1328
1330 if (spi->bits_per_word <= 8) { 1329 if (spi->bits_per_word <= 8) {
1331 chip->n_bytes = 1; 1330 chip->n_bytes = 1;
@@ -1466,7 +1465,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1466 1465
1467 platform_info = dev->platform_data; 1466 platform_info = dev->platform_data;
1468 1467
1469 ssp = ssp_request(pdev->id, pdev->name); 1468 ssp = pxa_ssp_request(pdev->id, pdev->name);
1470 if (ssp == NULL) { 1469 if (ssp == NULL) {
1471 dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); 1470 dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
1472 return -ENODEV; 1471 return -ENODEV;
@@ -1476,7 +1475,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1476 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 1475 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1477 if (!master) { 1476 if (!master) {
1478 dev_err(&pdev->dev, "cannot alloc spi_master\n"); 1477 dev_err(&pdev->dev, "cannot alloc spi_master\n");
1479 ssp_free(ssp); 1478 pxa_ssp_free(ssp);
1480 return -ENOMEM; 1479 return -ENOMEM;
1481 } 1480 }
1482 drv_data = spi_master_get_devdata(master); 1481 drv_data = spi_master_get_devdata(master);
@@ -1558,7 +1557,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1558 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 1557 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
1559 SSCR1_TxTresh(TX_THRESH_DFLT), 1558 SSCR1_TxTresh(TX_THRESH_DFLT),
1560 drv_data->ioaddr); 1559 drv_data->ioaddr);
1561 write_SSCR0(SSCR0_SerClkDiv(2) 1560 write_SSCR0(SSCR0_SCR(2)
1562 | SSCR0_Motorola 1561 | SSCR0_Motorola
1563 | SSCR0_DataSize(8), 1562 | SSCR0_DataSize(8),
1564 drv_data->ioaddr); 1563 drv_data->ioaddr);
@@ -1605,7 +1604,7 @@ out_error_irq_alloc:
1605 1604
1606out_error_master_alloc: 1605out_error_master_alloc:
1607 spi_master_put(master); 1606 spi_master_put(master);
1608 ssp_free(ssp); 1607 pxa_ssp_free(ssp);
1609 return status; 1608 return status;
1610} 1609}
1611 1610
@@ -1649,7 +1648,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1649 free_irq(ssp->irq, drv_data); 1648 free_irq(ssp->irq, drv_data);
1650 1649
1651 /* Release SSP */ 1650 /* Release SSP */
1652 ssp_free(ssp); 1651 pxa_ssp_free(ssp);
1653 1652
1654 /* Disconnect from the SPI framework */ 1653 /* Disconnect from the SPI framework */
1655 spi_unregister_master(drv_data->master); 1654 spi_unregister_master(drv_data->master);
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 4a6366a42129..111a01a747fc 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -380,6 +380,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
380 mutex_lock(&inode->i_mutex); 380 mutex_lock(&inode->i_mutex);
381 dentry_unhash(dentry); 381 dentry_unhash(dentry);
382 if (usbfs_empty(dentry)) { 382 if (usbfs_empty(dentry)) {
383 dont_mount(dentry);
383 drop_nlink(dentry->d_inode); 384 drop_nlink(dentry->d_inode);
384 drop_nlink(dentry->d_inode); 385 drop_nlink(dentry->d_inode);
385 dput(dentry); 386 dput(dentry);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index df1bae9b048e..eaa79c8a9b8c 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -366,6 +366,13 @@ rescan:
366 if (is_done) 366 if (is_done)
367 done(ep, req, 0); 367 done(ep, req, 0);
368 else if (ep->is_pingpong) { 368 else if (ep->is_pingpong) {
369 /*
370 * One dummy read to delay the code because of a HW glitch:
371 * CSR returns bad RXCOUNT when read too soon after updating
372 * RX_DATA_BK flags.
373 */
374 csr = __raw_readl(creg);
375
369 bufferspace -= count; 376 bufferspace -= count;
370 buf += count; 377 buf += count;
371 goto rescan; 378 goto rescan;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e69d238c5af0..49fa953aaf6e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1035,7 +1035,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1035/* This actually signals the guest, using eventfd. */ 1035/* This actually signals the guest, using eventfd. */
1036void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1036void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1037{ 1037{
1038 __u16 flags = 0; 1038 __u16 flags;
1039 /* Flush out used index updates. This is paired
1040 * with the barrier that the Guest executes when enabling
1041 * interrupts. */
1042 smp_mb();
1043
1039 if (get_user(flags, &vq->avail->flags)) { 1044 if (get_user(flags, &vq->avail->flags)) {
1040 vq_err(vq, "Failed to get flags"); 1045 vq_err(vq, "Failed to get flags");
1041 return; 1046 return;
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index dca48df98444..e5d6b56d4447 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -50,8 +50,9 @@
50#include <linux/fb.h> 50#include <linux/fb.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/ioport.h> 52#include <linux/ioport.h>
53 53#include <linux/platform_device.h>
54#include <linux/uaccess.h> 54#include <linux/uaccess.h>
55
55#include <asm/system.h> 56#include <asm/system.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/amigahw.h> 58#include <asm/amigahw.h>
@@ -1135,7 +1136,7 @@ static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg
1135 * Interface to the low level console driver 1136 * Interface to the low level console driver
1136 */ 1137 */
1137 1138
1138static void amifb_deinit(void); 1139static void amifb_deinit(struct platform_device *pdev);
1139 1140
1140 /* 1141 /*
1141 * Internal routines 1142 * Internal routines
@@ -2246,7 +2247,7 @@ static inline void chipfree(void)
2246 * Initialisation 2247 * Initialisation
2247 */ 2248 */
2248 2249
2249static int __init amifb_init(void) 2250static int __init amifb_probe(struct platform_device *pdev)
2250{ 2251{
2251 int tag, i, err = 0; 2252 int tag, i, err = 0;
2252 u_long chipptr; 2253 u_long chipptr;
@@ -2261,16 +2262,6 @@ static int __init amifb_init(void)
2261 } 2262 }
2262 amifb_setup(option); 2263 amifb_setup(option);
2263#endif 2264#endif
2264 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_VIDEO))
2265 return -ENODEV;
2266
2267 /*
2268 * We request all registers starting from bplpt[0]
2269 */
2270 if (!request_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120,
2271 "amifb [Denise/Lisa]"))
2272 return -EBUSY;
2273
2274 custom.dmacon = DMAF_ALL | DMAF_MASTER; 2265 custom.dmacon = DMAF_ALL | DMAF_MASTER;
2275 2266
2276 switch (amiga_chipset) { 2267 switch (amiga_chipset) {
@@ -2377,6 +2368,7 @@ default_chipset:
2377 fb_info.fbops = &amifb_ops; 2368 fb_info.fbops = &amifb_ops;
2378 fb_info.par = &currentpar; 2369 fb_info.par = &currentpar;
2379 fb_info.flags = FBINFO_DEFAULT; 2370 fb_info.flags = FBINFO_DEFAULT;
2371 fb_info.device = &pdev->dev;
2380 2372
2381 if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb, 2373 if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
2382 NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) { 2374 NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
@@ -2451,18 +2443,18 @@ default_chipset:
2451 return 0; 2443 return 0;
2452 2444
2453amifb_error: 2445amifb_error:
2454 amifb_deinit(); 2446 amifb_deinit(pdev);
2455 return err; 2447 return err;
2456} 2448}
2457 2449
2458static void amifb_deinit(void) 2450static void amifb_deinit(struct platform_device *pdev)
2459{ 2451{
2460 if (fb_info.cmap.len) 2452 if (fb_info.cmap.len)
2461 fb_dealloc_cmap(&fb_info.cmap); 2453 fb_dealloc_cmap(&fb_info.cmap);
2454 fb_dealloc_cmap(&fb_info.cmap);
2462 chipfree(); 2455 chipfree();
2463 if (videomemory) 2456 if (videomemory)
2464 iounmap((void*)videomemory); 2457 iounmap((void*)videomemory);
2465 release_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120);
2466 custom.dmacon = DMAF_ALL | DMAF_MASTER; 2458 custom.dmacon = DMAF_ALL | DMAF_MASTER;
2467} 2459}
2468 2460
@@ -3794,14 +3786,35 @@ static void ami_rebuild_copper(void)
3794 } 3786 }
3795} 3787}
3796 3788
3797static void __exit amifb_exit(void) 3789static int __exit amifb_remove(struct platform_device *pdev)
3798{ 3790{
3799 unregister_framebuffer(&fb_info); 3791 unregister_framebuffer(&fb_info);
3800 amifb_deinit(); 3792 amifb_deinit(pdev);
3801 amifb_video_off(); 3793 amifb_video_off();
3794 return 0;
3795}
3796
3797static struct platform_driver amifb_driver = {
3798 .remove = __exit_p(amifb_remove),
3799 .driver = {
3800 .name = "amiga-video",
3801 .owner = THIS_MODULE,
3802 },
3803};
3804
3805static int __init amifb_init(void)
3806{
3807 return platform_driver_probe(&amifb_driver, amifb_probe);
3802} 3808}
3803 3809
3804module_init(amifb_init); 3810module_init(amifb_init);
3811
3812static void __exit amifb_exit(void)
3813{
3814 platform_driver_unregister(&amifb_driver);
3815}
3816
3805module_exit(amifb_exit); 3817module_exit(amifb_exit);
3806 3818
3807MODULE_LICENSE("GPL"); 3819MODULE_LICENSE("GPL");
3820MODULE_ALIAS("platform:amiga-video");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 44e49c28b2a7..c2ec3dcd4e91 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -488,9 +488,9 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
488 fbinfo->fbops = &bfin_t350mcqb_fb_ops; 488 fbinfo->fbops = &bfin_t350mcqb_fb_ops;
489 fbinfo->flags = FBINFO_FLAG_DEFAULT; 489 fbinfo->flags = FBINFO_FLAG_DEFAULT;
490 490
491 info->fb_buffer = 491 info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len +
492 dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle, 492 ACTIVE_VIDEO_MEM_OFFSET,
493 GFP_KERNEL); 493 &info->dma_handle, GFP_KERNEL);
494 494
495 if (NULL == info->fb_buffer) { 495 if (NULL == info->fb_buffer) {
496 printk(KERN_ERR DRIVER_NAME 496 printk(KERN_ERR DRIVER_NAME
@@ -568,8 +568,8 @@ out7:
568out6: 568out6:
569 fb_dealloc_cmap(&fbinfo->cmap); 569 fb_dealloc_cmap(&fbinfo->cmap);
570out4: 570out4:
571 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, 571 dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET,
572 info->dma_handle); 572 info->fb_buffer, info->dma_handle);
573out3: 573out3:
574 framebuffer_release(fbinfo); 574 framebuffer_release(fbinfo);
575out2: 575out2:
@@ -592,8 +592,9 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
592 free_irq(info->irq, info); 592 free_irq(info->irq, info);
593 593
594 if (info->fb_buffer != NULL) 594 if (info->fb_buffer != NULL)
595 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, 595 dma_free_coherent(NULL, fbinfo->fix.smem_len +
596 info->dma_handle); 596 ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer,
597 info->dma_handle);
597 598
598 fb_dealloc_cmap(&fbinfo->cmap); 599 fb_dealloc_cmap(&fbinfo->cmap);
599 600
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 8d8dfda2f868..6df7c54db0a3 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -299,6 +299,7 @@ static const struct zorro_device_id cirrusfb_zorro_table[] = {
299 }, 299 },
300 { 0 } 300 { 0 }
301}; 301};
302MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
302 303
303static const struct { 304static const struct {
304 zorro_id id2; 305 zorro_id id2;
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 6c91c61cdb63..1b0feb8e7244 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -219,6 +219,7 @@ static struct zorro_device_id fm2fb_devices[] __devinitdata = {
219 { ZORRO_PROD_HELFRICH_RAINBOW_II }, 219 { ZORRO_PROD_HELFRICH_RAINBOW_II },
220 { 0 } 220 { 0 }
221}; 221};
222MODULE_DEVICE_TABLE(zorro, fm2fb_devices);
222 223
223static struct zorro_driver fm2fb_driver = { 224static struct zorro_driver fm2fb_driver = {
224 .name = "fm2fb", 225 .name = "fm2fb",
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0bf5020d0d32..b87ba23442d2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -175,7 +175,7 @@ config SA1100_WATCHDOG
175 175
176config MPCORE_WATCHDOG 176config MPCORE_WATCHDOG
177 tristate "MPcore watchdog" 177 tristate "MPcore watchdog"
178 depends on ARM_MPCORE_PLATFORM && LOCAL_TIMERS 178 depends on HAVE_ARM_TWD
179 help 179 help
180 Watchdog timer embedded into the MPcore system. 180 Watchdog timer embedded into the MPcore system.
181 181
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 016c6a791cab..b8ec7aca3c8e 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -31,8 +31,9 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/io.h>
34 35
35#include <asm/hardware/arm_twd.h> 36#include <asm/smp_twd.h>
36 37
37struct mpcore_wdt { 38struct mpcore_wdt {
38 unsigned long timer_alive; 39 unsigned long timer_alive;
@@ -44,7 +45,7 @@ struct mpcore_wdt {
44}; 45};
45 46
46static struct platform_device *mpcore_wdt_dev; 47static struct platform_device *mpcore_wdt_dev;
47extern unsigned int mpcore_timer_rate; 48static DEFINE_SPINLOCK(wdt_lock);
48 49
49#define TIMER_MARGIN 60 50#define TIMER_MARGIN 60
50static int mpcore_margin = TIMER_MARGIN; 51static int mpcore_margin = TIMER_MARGIN;
@@ -94,13 +95,15 @@ static irqreturn_t mpcore_wdt_fire(int irq, void *arg)
94 */ 95 */
95static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt) 96static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
96{ 97{
97 unsigned int count; 98 unsigned long count;
98 99
100 spin_lock(&wdt_lock);
99 /* Assume prescale is set to 256 */ 101 /* Assume prescale is set to 256 */
100 count = (mpcore_timer_rate / 256) * mpcore_margin; 102 count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
103 count = (0xFFFFFFFFU - count) * (HZ / 5);
104 count = (count / 256) * mpcore_margin;
101 105
102 /* Reload the counter */ 106 /* Reload the counter */
103 spin_lock(&wdt_lock);
104 writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); 107 writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
105 wdt->perturb = wdt->perturb ? 0 : 1; 108 wdt->perturb = wdt->perturb ? 0 : 1;
106 spin_unlock(&wdt_lock); 109 spin_unlock(&wdt_lock);
@@ -119,7 +122,6 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
119{ 122{
120 dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n"); 123 dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
121 124
122 spin_lock(&wdt_lock);
123 /* This loads the count register but does NOT start the count yet */ 125 /* This loads the count register but does NOT start the count yet */
124 mpcore_wdt_keepalive(wdt); 126 mpcore_wdt_keepalive(wdt);
125 127
@@ -130,7 +132,6 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
130 /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ 132 /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */
131 writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); 133 writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL);
132 } 134 }
133 spin_unlock(&wdt_lock);
134} 135}
135 136
136static int mpcore_wdt_set_heartbeat(int t) 137static int mpcore_wdt_set_heartbeat(int t)
@@ -360,7 +361,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
360 mpcore_wdt_miscdev.parent = &dev->dev; 361 mpcore_wdt_miscdev.parent = &dev->dev;
361 ret = misc_register(&mpcore_wdt_miscdev); 362 ret = misc_register(&mpcore_wdt_miscdev);
362 if (ret) { 363 if (ret) {
363 dev_printk(KERN_ERR, _dev, 364 dev_printk(KERN_ERR, wdt->dev,
364 "cannot register miscdev on minor=%d (err=%d)\n", 365 "cannot register miscdev on minor=%d (err=%d)\n",
365 WATCHDOG_MINOR, ret); 366 WATCHDOG_MINOR, ret);
366 goto err_misc; 367 goto err_misc;
@@ -369,13 +370,13 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
369 ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED, 370 ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED,
370 "mpcore_wdt", wdt); 371 "mpcore_wdt", wdt);
371 if (ret) { 372 if (ret) {
372 dev_printk(KERN_ERR, _dev, 373 dev_printk(KERN_ERR, wdt->dev,
373 "cannot register IRQ%d for watchdog\n", wdt->irq); 374 "cannot register IRQ%d for watchdog\n", wdt->irq);
374 goto err_irq; 375 goto err_irq;
375 } 376 }
376 377
377 mpcore_wdt_stop(wdt); 378 mpcore_wdt_stop(wdt);
378 platform_set_drvdata(&dev->dev, wdt); 379 platform_set_drvdata(dev, wdt);
379 mpcore_wdt_dev = dev; 380 mpcore_wdt_dev = dev;
380 381
381 return 0; 382 return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 2ac4440e7b08..8943b8ccee1a 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,12 +80,6 @@ static void do_suspend(void)
80 80
81 shutting_down = SHUTDOWN_SUSPEND; 81 shutting_down = SHUTDOWN_SUSPEND;
82 82
83 err = stop_machine_create();
84 if (err) {
85 printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
86 goto out;
87 }
88
89#ifdef CONFIG_PREEMPT 83#ifdef CONFIG_PREEMPT
90 /* If the kernel is preemptible, we need to freeze all the processes 84 /* If the kernel is preemptible, we need to freeze all the processes
91 to prevent them from being in the middle of a pagetable update 85 to prevent them from being in the middle of a pagetable update
@@ -93,7 +87,7 @@ static void do_suspend(void)
93 err = freeze_processes(); 87 err = freeze_processes();
94 if (err) { 88 if (err) {
95 printk(KERN_ERR "xen suspend: freeze failed %d\n", err); 89 printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
96 goto out_destroy_sm; 90 goto out;
97 } 91 }
98#endif 92#endif
99 93
@@ -136,12 +130,8 @@ out_resume:
136out_thaw: 130out_thaw:
137#ifdef CONFIG_PREEMPT 131#ifdef CONFIG_PREEMPT
138 thaw_processes(); 132 thaw_processes();
139
140out_destroy_sm:
141#endif
142 stop_machine_destroy();
143
144out: 133out:
134#endif
145 shutting_down = SHUTDOWN_INVALID; 135 shutting_down = SHUTDOWN_INVALID;
146} 136}
147#endif /* CONFIG_PM_SLEEP */ 137#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index d47c47fc048f..3c7046d79654 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -97,7 +97,7 @@ static void zorro_seq_stop(struct seq_file *m, void *v)
97 97
98static int zorro_seq_show(struct seq_file *m, void *v) 98static int zorro_seq_show(struct seq_file *m, void *v)
99{ 99{
100 u_int slot = *(loff_t *)v; 100 unsigned int slot = *(loff_t *)v;
101 struct zorro_dev *z = &zorro_autocon[slot]; 101 struct zorro_dev *z = &zorro_autocon[slot];
102 102
103 seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, 103 seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
@@ -129,7 +129,7 @@ static const struct file_operations zorro_devices_proc_fops = {
129 129
130static struct proc_dir_entry *proc_bus_zorro_dir; 130static struct proc_dir_entry *proc_bus_zorro_dir;
131 131
132static int __init zorro_proc_attach_device(u_int slot) 132static int __init zorro_proc_attach_device(unsigned int slot)
133{ 133{
134 struct proc_dir_entry *entry; 134 struct proc_dir_entry *entry;
135 char name[4]; 135 char name[4];
@@ -146,7 +146,7 @@ static int __init zorro_proc_attach_device(u_int slot)
146 146
147static int __init zorro_proc_init(void) 147static int __init zorro_proc_init(void)
148{ 148{
149 u_int slot; 149 unsigned int slot;
150 150
151 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { 151 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
152 proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); 152 proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 53180a37cc9a..7ee2b6e71786 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -137,10 +137,34 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
137 return 0; 137 return 0;
138} 138}
139 139
140static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
141{
142#ifdef CONFIG_HOTPLUG
143 struct zorro_dev *z;
144
145 if (!dev)
146 return -ENODEV;
147
148 z = to_zorro_dev(dev);
149 if (!z)
150 return -ENODEV;
151
152 if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) ||
153 add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) ||
154 add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) ||
155 add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id))
156 return -ENOMEM;
157
158 return 0;
159#else /* !CONFIG_HOTPLUG */
160 return -ENODEV;
161#endif /* !CONFIG_HOTPLUG */
162}
140 163
141struct bus_type zorro_bus_type = { 164struct bus_type zorro_bus_type = {
142 .name = "zorro", 165 .name = "zorro",
143 .match = zorro_bus_match, 166 .match = zorro_bus_match,
167 .uevent = zorro_uevent,
144 .probe = zorro_device_probe, 168 .probe = zorro_device_probe,
145 .remove = zorro_device_remove, 169 .remove = zorro_device_remove,
146}; 170};
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 1d2a772ea14c..eb924e0a64ce 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -77,6 +77,16 @@ static struct bin_attribute zorro_config_attr = {
77 .read = zorro_read_config, 77 .read = zorro_read_config,
78}; 78};
79 79
80static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
81 char *buf)
82{
83 struct zorro_dev *z = to_zorro_dev(dev);
84
85 return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
86}
87
88static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
89
80int zorro_create_sysfs_dev_files(struct zorro_dev *z) 90int zorro_create_sysfs_dev_files(struct zorro_dev *z)
81{ 91{
82 struct device *dev = &z->dev; 92 struct device *dev = &z->dev;
@@ -89,6 +99,7 @@ int zorro_create_sysfs_dev_files(struct zorro_dev *z)
89 (error = device_create_file(dev, &dev_attr_slotaddr)) || 99 (error = device_create_file(dev, &dev_attr_slotaddr)) ||
90 (error = device_create_file(dev, &dev_attr_slotsize)) || 100 (error = device_create_file(dev, &dev_attr_slotsize)) ||
91 (error = device_create_file(dev, &dev_attr_resource)) || 101 (error = device_create_file(dev, &dev_attr_resource)) ||
102 (error = device_create_file(dev, &dev_attr_modalias)) ||
92 (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr))) 103 (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
93 return error; 104 return error;
94 105
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d45fb34e2d23..6455f3a244c5 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -15,6 +15,8 @@
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
18 20
19#include <asm/setup.h> 21#include <asm/setup.h>
20#include <asm/amigahw.h> 22#include <asm/amigahw.h>
@@ -26,24 +28,17 @@
26 * Zorro Expansion Devices 28 * Zorro Expansion Devices
27 */ 29 */
28 30
29u_int zorro_num_autocon = 0; 31unsigned int zorro_num_autocon;
30struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; 32struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
31 33
32 34
33 /* 35 /*
34 * Single Zorro bus 36 * Zorro bus
35 */ 37 */
36 38
37struct zorro_bus zorro_bus = {\ 39struct zorro_bus {
38 .resources = { 40 struct list_head devices; /* list of devices on this bus */
39 /* Zorro II regions (on Zorro II/III) */ 41 struct device dev;
40 { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff },
41 { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff },
42 /* Zorro III regions (on Zorro III only) */
43 { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff },
44 { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff }
45 },
46 .name = "Zorro bus"
47}; 42};
48 43
49 44
@@ -53,18 +48,19 @@ struct zorro_bus zorro_bus = {\
53 48
54struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) 49struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
55{ 50{
56 struct zorro_dev *z; 51 struct zorro_dev *z;
57 52
58 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) 53 if (!zorro_num_autocon)
59 return NULL; 54 return NULL;
60 55
61 for (z = from ? from+1 : &zorro_autocon[0]; 56 for (z = from ? from+1 : &zorro_autocon[0];
62 z < zorro_autocon+zorro_num_autocon; 57 z < zorro_autocon+zorro_num_autocon;
63 z++) 58 z++)
64 if (id == ZORRO_WILDCARD || id == z->id) 59 if (id == ZORRO_WILDCARD || id == z->id)
65 return z; 60 return z;
66 return NULL; 61 return NULL;
67} 62}
63EXPORT_SYMBOL(zorro_find_device);
68 64
69 65
70 /* 66 /*
@@ -83,121 +79,138 @@ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
83 */ 79 */
84 80
85DECLARE_BITMAP(zorro_unused_z2ram, 128); 81DECLARE_BITMAP(zorro_unused_z2ram, 128);
82EXPORT_SYMBOL(zorro_unused_z2ram);
86 83
87 84
88static void __init mark_region(unsigned long start, unsigned long end, 85static void __init mark_region(unsigned long start, unsigned long end,
89 int flag) 86 int flag)
90{ 87{
91 if (flag)
92 start += Z2RAM_CHUNKMASK;
93 else
94 end += Z2RAM_CHUNKMASK;
95 start &= ~Z2RAM_CHUNKMASK;
96 end &= ~Z2RAM_CHUNKMASK;
97
98 if (end <= Z2RAM_START || start >= Z2RAM_END)
99 return;
100 start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
101 end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
102 while (start < end) {
103 u32 chunk = start>>Z2RAM_CHUNKSHIFT;
104 if (flag) 88 if (flag)
105 set_bit(chunk, zorro_unused_z2ram); 89 start += Z2RAM_CHUNKMASK;
106 else 90 else
107 clear_bit(chunk, zorro_unused_z2ram); 91 end += Z2RAM_CHUNKMASK;
108 start += Z2RAM_CHUNKSIZE; 92 start &= ~Z2RAM_CHUNKMASK;
109 } 93 end &= ~Z2RAM_CHUNKMASK;
94
95 if (end <= Z2RAM_START || start >= Z2RAM_END)
96 return;
97 start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
98 end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
99 while (start < end) {
100 u32 chunk = start>>Z2RAM_CHUNKSHIFT;
101 if (flag)
102 set_bit(chunk, zorro_unused_z2ram);
103 else
104 clear_bit(chunk, zorro_unused_z2ram);
105 start += Z2RAM_CHUNKSIZE;
106 }
110} 107}
111 108
112 109
113static struct resource __init *zorro_find_parent_resource(struct zorro_dev *z) 110static struct resource __init *zorro_find_parent_resource(
111 struct platform_device *bridge, struct zorro_dev *z)
114{ 112{
115 int i; 113 int i;
116 114
117 for (i = 0; i < zorro_bus.num_resources; i++) 115 for (i = 0; i < bridge->num_resources; i++) {
118 if (zorro_resource_start(z) >= zorro_bus.resources[i].start && 116 struct resource *r = &bridge->resource[i];
119 zorro_resource_end(z) <= zorro_bus.resources[i].end) 117 if (zorro_resource_start(z) >= r->start &&
120 return &zorro_bus.resources[i]; 118 zorro_resource_end(z) <= r->end)
121 return &iomem_resource; 119 return r;
120 }
121 return &iomem_resource;
122} 122}
123 123
124 124
125 /*
126 * Initialization
127 */
128 125
129static int __init zorro_init(void) 126static int __init amiga_zorro_probe(struct platform_device *pdev)
130{ 127{
131 struct zorro_dev *z; 128 struct zorro_bus *bus;
132 unsigned int i; 129 struct zorro_dev *z;
133 int error; 130 struct resource *r;
134 131 unsigned int i;
135 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) 132 int error;
136 return 0; 133
137 134 /* Initialize the Zorro bus */
138 pr_info("Zorro: Probing AutoConfig expansion devices: %d device%s\n", 135 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
139 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); 136 if (!bus)
140 137 return -ENOMEM;
141 /* Initialize the Zorro bus */ 138
142 INIT_LIST_HEAD(&zorro_bus.devices); 139 INIT_LIST_HEAD(&bus->devices);
143 dev_set_name(&zorro_bus.dev, "zorro"); 140 bus->dev.parent = &pdev->dev;
144 error = device_register(&zorro_bus.dev); 141 dev_set_name(&bus->dev, "zorro");
145 if (error) { 142 error = device_register(&bus->dev);
146 pr_err("Zorro: Error registering zorro_bus\n");
147 return error;
148 }
149
150 /* Request the resources */
151 zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
152 for (i = 0; i < zorro_bus.num_resources; i++)
153 request_resource(&iomem_resource, &zorro_bus.resources[i]);
154
155 /* Register all devices */
156 for (i = 0; i < zorro_num_autocon; i++) {
157 z = &zorro_autocon[i];
158 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
159 if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
160 /* GVP quirk */
161 unsigned long magic = zorro_resource_start(z)+0x8000;
162 z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
163 }
164 sprintf(z->name, "Zorro device %08x", z->id);
165 zorro_name_device(z);
166 z->resource.name = z->name;
167 if (request_resource(zorro_find_parent_resource(z), &z->resource))
168 pr_err("Zorro: Address space collision on device %s %pR\n",
169 z->name, &z->resource);
170 dev_set_name(&z->dev, "%02x", i);
171 z->dev.parent = &zorro_bus.dev;
172 z->dev.bus = &zorro_bus_type;
173 error = device_register(&z->dev);
174 if (error) { 143 if (error) {
175 pr_err("Zorro: Error registering device %s\n", z->name); 144 pr_err("Zorro: Error registering zorro_bus\n");
176 continue; 145 kfree(bus);
146 return error;
177 } 147 }
178 error = zorro_create_sysfs_dev_files(z); 148 platform_set_drvdata(pdev, bus);
179 if (error) 149
180 dev_err(&z->dev, "Error creating sysfs files\n"); 150 /* Register all devices */
181 } 151 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
182 152 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
183 /* Mark all available Zorro II memory */ 153
184 zorro_for_each_dev(z) { 154 for (i = 0; i < zorro_num_autocon; i++) {
185 if (z->rom.er_Type & ERTF_MEMLIST) 155 z = &zorro_autocon[i];
186 mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1); 156 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
187 } 157 if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
188 158 /* GVP quirk */
189 /* Unmark all used Zorro II memory */ 159 unsigned long magic = zorro_resource_start(z)+0x8000;
190 for (i = 0; i < m68k_num_memory; i++) 160 z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
191 if (m68k_memory[i].addr < 16*1024*1024) 161 }
192 mark_region(m68k_memory[i].addr, 162 sprintf(z->name, "Zorro device %08x", z->id);
193 m68k_memory[i].addr+m68k_memory[i].size, 0); 163 zorro_name_device(z);
194 164 z->resource.name = z->name;
195 return 0; 165 r = zorro_find_parent_resource(pdev, z);
166 error = request_resource(r, &z->resource);
167 if (error)
168 dev_err(&bus->dev,
169 "Address space collision on device %s %pR\n",
170 z->name, &z->resource);
171 dev_set_name(&z->dev, "%02x", i);
172 z->dev.parent = &bus->dev;
173 z->dev.bus = &zorro_bus_type;
174 error = device_register(&z->dev);
175 if (error) {
176 dev_err(&bus->dev, "Error registering device %s\n",
177 z->name);
178 continue;
179 }
180 error = zorro_create_sysfs_dev_files(z);
181 if (error)
182 dev_err(&z->dev, "Error creating sysfs files\n");
183 }
184
185 /* Mark all available Zorro II memory */
186 zorro_for_each_dev(z) {
187 if (z->rom.er_Type & ERTF_MEMLIST)
188 mark_region(zorro_resource_start(z),
189 zorro_resource_end(z)+1, 1);
190 }
191
192 /* Unmark all used Zorro II memory */
193 for (i = 0; i < m68k_num_memory; i++)
194 if (m68k_memory[i].addr < 16*1024*1024)
195 mark_region(m68k_memory[i].addr,
196 m68k_memory[i].addr+m68k_memory[i].size,
197 0);
198
199 return 0;
196} 200}
197 201
198subsys_initcall(zorro_init); 202static struct platform_driver amiga_zorro_driver = {
203 .driver = {
204 .name = "amiga-zorro",
205 .owner = THIS_MODULE,
206 },
207};
199 208
200EXPORT_SYMBOL(zorro_find_device); 209static int __init amiga_zorro_init(void)
201EXPORT_SYMBOL(zorro_unused_z2ram); 210{
211 return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe);
212}
213
214module_init(amiga_zorro_init);
202 215
203MODULE_LICENSE("GPL"); 216MODULE_LICENSE("GPL");