diff options
80 files changed, 16723 insertions, 2548 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index e6244cde26e9..05c80645e4ee 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
| @@ -2560,9 +2560,6 @@ Your cooperation is appreciated. | |||
| 2560 | 96 = /dev/usb/hiddev0 1st USB HID device | 2560 | 96 = /dev/usb/hiddev0 1st USB HID device |
| 2561 | ... | 2561 | ... |
| 2562 | 111 = /dev/usb/hiddev15 16th USB HID device | 2562 | 111 = /dev/usb/hiddev15 16th USB HID device |
| 2563 | 112 = /dev/usb/auer0 1st auerswald ISDN device | ||
| 2564 | ... | ||
| 2565 | 127 = /dev/usb/auer15 16th auerswald ISDN device | ||
| 2566 | 128 = /dev/usb/brlvgr0 First Braille Voyager device | 2563 | 128 = /dev/usb/brlvgr0 First Braille Voyager device |
| 2567 | ... | 2564 | ... |
| 2568 | 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device | 2565 | 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device |
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt index 3bb5f466a90d..1c6b545635a2 100644 --- a/Documentation/ioctl-number.txt +++ b/Documentation/ioctl-number.txt | |||
| @@ -105,7 +105,6 @@ Code Seq# Include File Comments | |||
| 105 | 'T' all linux/soundcard.h conflict! | 105 | 'T' all linux/soundcard.h conflict! |
| 106 | 'T' all asm-i386/ioctls.h conflict! | 106 | 'T' all asm-i386/ioctls.h conflict! |
| 107 | 'U' 00-EF linux/drivers/usb/usb.h | 107 | 'U' 00-EF linux/drivers/usb/usb.h |
| 108 | 'U' F0-FF drivers/usb/auerswald.c | ||
| 109 | 'V' all linux/vt.h | 108 | 'V' all linux/vt.h |
| 110 | 'W' 00-1F linux/watchdog.h conflict! | 109 | 'W' 00-1F linux/watchdog.h conflict! |
| 111 | 'W' 00-1F linux/wanrouter.h conflict! | 110 | 'W' 00-1F linux/wanrouter.h conflict! |
diff --git a/Documentation/usb/auerswald.txt b/Documentation/usb/auerswald.txt deleted file mode 100644 index 7ee4d8f69116..000000000000 --- a/Documentation/usb/auerswald.txt +++ /dev/null | |||
| @@ -1,30 +0,0 @@ | |||
| 1 | Auerswald USB kernel driver | ||
| 2 | =========================== | ||
| 3 | |||
| 4 | What is it? What can I do with it? | ||
| 5 | ================================== | ||
| 6 | The auerswald USB kernel driver connects your linux 2.4.x | ||
| 7 | system to the auerswald usb-enabled devices. | ||
| 8 | |||
| 9 | There are two types of auerswald usb devices: | ||
| 10 | a) small PBX systems (ISDN) | ||
| 11 | b) COMfort system telephones (ISDN) | ||
| 12 | |||
| 13 | The driver installation creates the devices | ||
| 14 | /dev/usb/auer0..15. These devices carry a vendor- | ||
| 15 | specific protocol. You may run all auerswald java | ||
| 16 | software on it. The java software needs a native | ||
| 17 | library "libAuerUsbJNINative.so" installed on | ||
| 18 | your system. This library is available from | ||
| 19 | auerswald and shipped as part of the java software. | ||
| 20 | |||
| 21 | You may create the devices with: | ||
| 22 | mknod -m 666 /dev/usb/auer0 c 180 112 | ||
| 23 | ... | ||
| 24 | mknod -m 666 /dev/usb/auer15 c 180 127 | ||
| 25 | |||
| 26 | Future plans | ||
| 27 | ============ | ||
| 28 | - Connection to ISDN4LINUX (the hisax interface) | ||
| 29 | |||
| 30 | The maintainer of this driver is wolfgang@iksw-muees.de | ||
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt index b2fc4d4a9917..9d31140e3f5b 100644 --- a/Documentation/usb/power-management.txt +++ b/Documentation/usb/power-management.txt | |||
| @@ -436,7 +436,12 @@ post_reset; the USB core guarantees that this is true of internal | |||
| 436 | suspend/resume events as well. | 436 | suspend/resume events as well. |
| 437 | 437 | ||
| 438 | If a driver wants to block all suspend/resume calls during some | 438 | If a driver wants to block all suspend/resume calls during some |
| 439 | critical section, it can simply acquire udev->pm_mutex. | 439 | critical section, it can simply acquire udev->pm_mutex. Note that |
| 440 | calls to resume may be triggered indirectly. Block IO due to memory | ||
| 441 | allocations can make the vm subsystem resume a device. Thus while | ||
| 442 | holding this lock you must not allocate memory with GFP_KERNEL or | ||
| 443 | GFP_NOFS. | ||
| 444 | |||
| 440 | Alternatively, if the critical section might call some of the | 445 | Alternatively, if the critical section might call some of the |
| 441 | usb_autopm_* routines, the driver can avoid deadlock by doing: | 446 | usb_autopm_* routines, the driver can avoid deadlock by doing: |
| 442 | 447 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index e5610b275b22..4c5e9fe0f7db 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2928,6 +2928,12 @@ M: jirislaby@gmail.com | |||
| 2928 | L: linux-kernel@vger.kernel.org | 2928 | L: linux-kernel@vger.kernel.org |
| 2929 | S: Maintained | 2929 | S: Maintained |
| 2930 | 2930 | ||
| 2931 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER | ||
| 2932 | P: Felipe Balbi | ||
| 2933 | M: felipe.balbi@nokia.com | ||
| 2934 | L: linux-usb@vger.kernel.org | ||
| 2935 | S: Maintained | ||
| 2936 | |||
| 2931 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) | 2937 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) |
| 2932 | P: Andrew Gallatin | 2938 | P: Andrew Gallatin |
| 2933 | M: gallatin@myri.com | 2939 | M: gallatin@myri.com |
| @@ -4196,12 +4202,6 @@ M: oliver@neukum.name | |||
| 4196 | L: linux-usb@vger.kernel.org | 4202 | L: linux-usb@vger.kernel.org |
| 4197 | S: Maintained | 4203 | S: Maintained |
| 4198 | 4204 | ||
| 4199 | USB AUERSWALD DRIVER | ||
| 4200 | P: Wolfgang Muees | ||
| 4201 | M: wolfgang@iksw-muees.de | ||
| 4202 | L: linux-usb@vger.kernel.org | ||
| 4203 | S: Maintained | ||
| 4204 | |||
| 4205 | USB BLOCK DRIVER (UB ub) | 4205 | USB BLOCK DRIVER (UB ub) |
| 4206 | P: Pete Zaitcev | 4206 | P: Pete Zaitcev |
| 4207 | M: zaitcev@redhat.com | 4207 | M: zaitcev@redhat.com |
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c index 1607c941d95f..10ef464d6be7 100644 --- a/arch/arm/mach-omap2/usb-tusb6010.c +++ b/arch/arm/mach-omap2/usb-tusb6010.c | |||
| @@ -317,7 +317,6 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data, | |||
| 317 | printk(error, 6, status); | 317 | printk(error, 6, status); |
| 318 | return -ENODEV; | 318 | return -ENODEV; |
| 319 | } | 319 | } |
| 320 | data->multipoint = 1; | ||
| 321 | tusb_device.dev.platform_data = data; | 320 | tusb_device.dev.platform_data = data; |
| 322 | 321 | ||
| 323 | /* REVISIT let the driver know what DMA channels work */ | 322 | /* REVISIT let the driver know what DMA channels work */ |
diff --git a/drivers/Makefile b/drivers/Makefile index a280ab3d0833..2735bde73475 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
| @@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ | |||
| 57 | obj-$(CONFIG_PARIDE) += block/paride/ | 57 | obj-$(CONFIG_PARIDE) += block/paride/ |
| 58 | obj-$(CONFIG_TC) += tc/ | 58 | obj-$(CONFIG_TC) += tc/ |
| 59 | obj-$(CONFIG_USB) += usb/ | 59 | obj-$(CONFIG_USB) += usb/ |
| 60 | obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/ | ||
| 60 | obj-$(CONFIG_PCI) += usb/ | 61 | obj-$(CONFIG_PCI) += usb/ |
| 61 | obj-$(CONFIG_USB_GADGET) += usb/gadget/ | 62 | obj-$(CONFIG_USB_GADGET) += usb/gadget/ |
| 62 | obj-$(CONFIG_SERIO) += input/serio/ | 63 | obj-$(CONFIG_SERIO) += input/serio/ |
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c index 18355ae2155d..4655b794ebe3 100644 --- a/drivers/i2c/chips/isp1301_omap.c +++ b/drivers/i2c/chips/isp1301_omap.c | |||
| @@ -1593,7 +1593,7 @@ fail1: | |||
| 1593 | if (machine_is_omap_h2()) { | 1593 | if (machine_is_omap_h2()) { |
| 1594 | /* full speed signaling by default */ | 1594 | /* full speed signaling by default */ |
| 1595 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, | 1595 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, |
| 1596 | MC1_SPEED_REG); | 1596 | MC1_SPEED); |
| 1597 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, | 1597 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, |
| 1598 | MC2_SPD_SUSP_CTRL); | 1598 | MC2_SPD_SUSP_CTRL); |
| 1599 | 1599 | ||
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 755823cdf62a..bcefbddeba50 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
| @@ -95,16 +95,18 @@ config USB | |||
| 95 | 95 | ||
| 96 | source "drivers/usb/core/Kconfig" | 96 | source "drivers/usb/core/Kconfig" |
| 97 | 97 | ||
| 98 | source "drivers/usb/mon/Kconfig" | ||
| 99 | |||
| 98 | source "drivers/usb/host/Kconfig" | 100 | source "drivers/usb/host/Kconfig" |
| 99 | 101 | ||
| 102 | source "drivers/usb/musb/Kconfig" | ||
| 103 | |||
| 100 | source "drivers/usb/class/Kconfig" | 104 | source "drivers/usb/class/Kconfig" |
| 101 | 105 | ||
| 102 | source "drivers/usb/storage/Kconfig" | 106 | source "drivers/usb/storage/Kconfig" |
| 103 | 107 | ||
| 104 | source "drivers/usb/image/Kconfig" | 108 | source "drivers/usb/image/Kconfig" |
| 105 | 109 | ||
| 106 | source "drivers/usb/mon/Kconfig" | ||
| 107 | |||
| 108 | comment "USB port drivers" | 110 | comment "USB port drivers" |
| 109 | depends on USB | 111 | depends on USB |
| 110 | 112 | ||
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 507a9bd0d77c..9aea43a8c4ad 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
| @@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ | |||
| 602 | offd = le32_to_cpu(buf[offb++]); | 602 | offd = le32_to_cpu(buf[offb++]); |
| 603 | if (offd >= size) { | 603 | if (offd >= size) { |
| 604 | if (printk_ratelimit()) | 604 | if (printk_ratelimit()) |
| 605 | usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n", | 605 | usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n", |
| 606 | offd, cm); | 606 | offd, cm); |
| 607 | ret = -EIO; | 607 | ret = -EIO; |
| 608 | goto cleanup; | 608 | goto cleanup; |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 0725b1871f23..efc4373ededb 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | */ | 51 | */ |
| 52 | 52 | ||
| 53 | #undef DEBUG | 53 | #undef DEBUG |
| 54 | #undef VERBOSE_DEBUG | ||
| 54 | 55 | ||
| 55 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
| 56 | #include <linux/errno.h> | 57 | #include <linux/errno.h> |
| @@ -70,6 +71,9 @@ | |||
| 70 | 71 | ||
| 71 | #include "cdc-acm.h" | 72 | #include "cdc-acm.h" |
| 72 | 73 | ||
| 74 | |||
| 75 | #define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */ | ||
| 76 | |||
| 73 | /* | 77 | /* |
| 74 | * Version Information | 78 | * Version Information |
| 75 | */ | 79 | */ |
| @@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex); | |||
| 85 | 89 | ||
| 86 | #define ACM_READY(acm) (acm && acm->dev && acm->used) | 90 | #define ACM_READY(acm) (acm && acm->dev && acm->used) |
| 87 | 91 | ||
| 92 | #ifdef VERBOSE_DEBUG | ||
| 93 | #define verbose 1 | ||
| 94 | #else | ||
| 95 | #define verbose 0 | ||
| 96 | #endif | ||
| 97 | |||
| 88 | /* | 98 | /* |
| 89 | * Functions for ACM control messages. | 99 | * Functions for ACM control messages. |
| 90 | */ | 100 | */ |
| @@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm) | |||
| 136 | static int acm_wb_is_avail(struct acm *acm) | 146 | static int acm_wb_is_avail(struct acm *acm) |
| 137 | { | 147 | { |
| 138 | int i, n; | 148 | int i, n; |
| 149 | unsigned long flags; | ||
| 139 | 150 | ||
| 140 | n = ACM_NW; | 151 | n = ACM_NW; |
| 152 | spin_lock_irqsave(&acm->write_lock, flags); | ||
| 141 | for (i = 0; i < ACM_NW; i++) { | 153 | for (i = 0; i < ACM_NW; i++) { |
| 142 | n -= acm->wb[i].use; | 154 | n -= acm->wb[i].use; |
| 143 | } | 155 | } |
| 156 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
| 144 | return n; | 157 | return n; |
| 145 | } | 158 | } |
| 146 | 159 | ||
| 147 | static inline int acm_wb_is_used(struct acm *acm, int wbn) | ||
| 148 | { | ||
| 149 | return acm->wb[wbn].use; | ||
| 150 | } | ||
| 151 | |||
| 152 | /* | 160 | /* |
| 153 | * Finish write. | 161 | * Finish write. |
| 154 | */ | 162 | */ |
| @@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb) | |||
| 157 | unsigned long flags; | 165 | unsigned long flags; |
| 158 | 166 | ||
| 159 | spin_lock_irqsave(&acm->write_lock, flags); | 167 | spin_lock_irqsave(&acm->write_lock, flags); |
| 160 | acm->write_ready = 1; | ||
| 161 | wb->use = 0; | 168 | wb->use = 0; |
| 162 | acm->transmitting--; | 169 | acm->transmitting--; |
| 163 | spin_unlock_irqrestore(&acm->write_lock, flags); | 170 | spin_unlock_irqrestore(&acm->write_lock, flags); |
| @@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) | |||
| 190 | static int acm_write_start(struct acm *acm, int wbn) | 197 | static int acm_write_start(struct acm *acm, int wbn) |
| 191 | { | 198 | { |
| 192 | unsigned long flags; | 199 | unsigned long flags; |
| 193 | struct acm_wb *wb; | 200 | struct acm_wb *wb = &acm->wb[wbn]; |
| 194 | int rc; | 201 | int rc; |
| 195 | 202 | ||
| 196 | spin_lock_irqsave(&acm->write_lock, flags); | 203 | spin_lock_irqsave(&acm->write_lock, flags); |
| 197 | if (!acm->dev) { | 204 | if (!acm->dev) { |
| 205 | wb->use = 0; | ||
| 198 | spin_unlock_irqrestore(&acm->write_lock, flags); | 206 | spin_unlock_irqrestore(&acm->write_lock, flags); |
| 199 | return -ENODEV; | 207 | return -ENODEV; |
| 200 | } | 208 | } |
| 201 | 209 | ||
| 202 | if (!acm->write_ready) { | ||
| 203 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
| 204 | return 0; /* A white lie */ | ||
| 205 | } | ||
| 206 | |||
| 207 | wb = &acm->wb[wbn]; | ||
| 208 | if(acm_wb_is_avail(acm) <= 1) | ||
| 209 | acm->write_ready = 0; | ||
| 210 | |||
| 211 | dbg("%s susp_count: %d", __func__, acm->susp_count); | 210 | dbg("%s susp_count: %d", __func__, acm->susp_count); |
| 212 | if (acm->susp_count) { | 211 | if (acm->susp_count) { |
| 213 | acm->old_ready = acm->write_ready; | ||
| 214 | acm->delayed_wb = wb; | 212 | acm->delayed_wb = wb; |
| 215 | acm->write_ready = 0; | ||
| 216 | schedule_work(&acm->waker); | 213 | schedule_work(&acm->waker); |
| 217 | spin_unlock_irqrestore(&acm->write_lock, flags); | 214 | spin_unlock_irqrestore(&acm->write_lock, flags); |
| 218 | return 0; /* A white lie */ | 215 | return 0; /* A white lie */ |
| 219 | } | 216 | } |
| 220 | usb_mark_last_busy(acm->dev); | 217 | usb_mark_last_busy(acm->dev); |
| 221 | 218 | ||
| 222 | if (!acm_wb_is_used(acm, wbn)) { | ||
| 223 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | |||
| 227 | rc = acm_start_wb(acm, wb); | 219 | rc = acm_start_wb(acm, wb); |
| 228 | spin_unlock_irqrestore(&acm->write_lock, flags); | 220 | spin_unlock_irqrestore(&acm->write_lock, flags); |
| 229 | 221 | ||
| @@ -488,22 +480,28 @@ urbs: | |||
| 488 | /* data interface wrote those outgoing bytes */ | 480 | /* data interface wrote those outgoing bytes */ |
| 489 | static void acm_write_bulk(struct urb *urb) | 481 | static void acm_write_bulk(struct urb *urb) |
| 490 | { | 482 | { |
| 491 | struct acm *acm; | ||
| 492 | struct acm_wb *wb = urb->context; | 483 | struct acm_wb *wb = urb->context; |
| 484 | struct acm *acm = wb->instance; | ||
| 493 | 485 | ||
| 494 | dbg("Entering acm_write_bulk with status %d", urb->status); | 486 | if (verbose || urb->status |
| 487 | || (urb->actual_length != urb->transfer_buffer_length)) | ||
| 488 | dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n", | ||
| 489 | urb->actual_length, | ||
| 490 | urb->transfer_buffer_length, | ||
| 491 | urb->status); | ||
| 495 | 492 | ||
| 496 | acm = wb->instance; | ||
| 497 | acm_write_done(acm, wb); | 493 | acm_write_done(acm, wb); |
| 498 | if (ACM_READY(acm)) | 494 | if (ACM_READY(acm)) |
| 499 | schedule_work(&acm->work); | 495 | schedule_work(&acm->work); |
| 496 | else | ||
| 497 | wake_up_interruptible(&acm->drain_wait); | ||
| 500 | } | 498 | } |
| 501 | 499 | ||
| 502 | static void acm_softint(struct work_struct *work) | 500 | static void acm_softint(struct work_struct *work) |
| 503 | { | 501 | { |
| 504 | struct acm *acm = container_of(work, struct acm, work); | 502 | struct acm *acm = container_of(work, struct acm, work); |
| 505 | dbg("Entering acm_softint."); | 503 | |
| 506 | 504 | dev_vdbg(&acm->data->dev, "tx work\n"); | |
| 507 | if (!ACM_READY(acm)) | 505 | if (!ACM_READY(acm)) |
| 508 | return; | 506 | return; |
| 509 | tty_wakeup(acm->tty); | 507 | tty_wakeup(acm->tty); |
| @@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work) | |||
| 512 | static void acm_waker(struct work_struct *waker) | 510 | static void acm_waker(struct work_struct *waker) |
| 513 | { | 511 | { |
| 514 | struct acm *acm = container_of(waker, struct acm, waker); | 512 | struct acm *acm = container_of(waker, struct acm, waker); |
| 515 | long flags; | ||
| 516 | int rv; | 513 | int rv; |
| 517 | 514 | ||
| 518 | rv = usb_autopm_get_interface(acm->control); | 515 | rv = usb_autopm_get_interface(acm->control); |
| @@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker) | |||
| 524 | acm_start_wb(acm, acm->delayed_wb); | 521 | acm_start_wb(acm, acm->delayed_wb); |
| 525 | acm->delayed_wb = NULL; | 522 | acm->delayed_wb = NULL; |
| 526 | } | 523 | } |
| 527 | spin_lock_irqsave(&acm->write_lock, flags); | ||
| 528 | acm->write_ready = acm->old_ready; | ||
| 529 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
| 530 | usb_autopm_put_interface(acm->control); | 524 | usb_autopm_put_interface(acm->control); |
| 531 | } | 525 | } |
| 532 | 526 | ||
| @@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm) | |||
| 628 | kfree(acm); | 622 | kfree(acm); |
| 629 | } | 623 | } |
| 630 | 624 | ||
| 625 | static int acm_tty_chars_in_buffer(struct tty_struct *tty); | ||
| 626 | |||
| 631 | static void acm_tty_close(struct tty_struct *tty, struct file *filp) | 627 | static void acm_tty_close(struct tty_struct *tty, struct file *filp) |
| 632 | { | 628 | { |
| 633 | struct acm *acm = tty->driver_data; | 629 | struct acm *acm = tty->driver_data; |
| @@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) | |||
| 642 | if (acm->dev) { | 638 | if (acm->dev) { |
| 643 | usb_autopm_get_interface(acm->control); | 639 | usb_autopm_get_interface(acm->control); |
| 644 | acm_set_control(acm, acm->ctrlout = 0); | 640 | acm_set_control(acm, acm->ctrlout = 0); |
| 641 | |||
| 642 | /* try letting the last writes drain naturally */ | ||
| 643 | wait_event_interruptible_timeout(acm->drain_wait, | ||
| 644 | (ACM_NW == acm_wb_is_avail(acm)) | ||
| 645 | || !acm->dev, | ||
| 646 | ACM_CLOSE_TIMEOUT * HZ); | ||
| 647 | |||
| 645 | usb_kill_urb(acm->ctrlurb); | 648 | usb_kill_urb(acm->ctrlurb); |
| 646 | for (i = 0; i < ACM_NW; i++) | 649 | for (i = 0; i < ACM_NW; i++) |
| 647 | usb_kill_urb(acm->wb[i].urb); | 650 | usb_kill_urb(acm->wb[i].urb); |
| @@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty) | |||
| 697 | * Do not let the line discipline to know that we have a reserve, | 700 | * Do not let the line discipline to know that we have a reserve, |
| 698 | * or it might get too enthusiastic. | 701 | * or it might get too enthusiastic. |
| 699 | */ | 702 | */ |
| 700 | return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0; | 703 | return acm_wb_is_avail(acm) ? acm->writesize : 0; |
| 701 | } | 704 | } |
| 702 | 705 | ||
| 703 | static int acm_tty_chars_in_buffer(struct tty_struct *tty) | 706 | static int acm_tty_chars_in_buffer(struct tty_struct *tty) |
| @@ -1072,11 +1075,11 @@ skip_normal_probe: | |||
| 1072 | acm->urb_task.data = (unsigned long) acm; | 1075 | acm->urb_task.data = (unsigned long) acm; |
| 1073 | INIT_WORK(&acm->work, acm_softint); | 1076 | INIT_WORK(&acm->work, acm_softint); |
| 1074 | INIT_WORK(&acm->waker, acm_waker); | 1077 | INIT_WORK(&acm->waker, acm_waker); |
| 1078 | init_waitqueue_head(&acm->drain_wait); | ||
| 1075 | spin_lock_init(&acm->throttle_lock); | 1079 | spin_lock_init(&acm->throttle_lock); |
| 1076 | spin_lock_init(&acm->write_lock); | 1080 | spin_lock_init(&acm->write_lock); |
| 1077 | spin_lock_init(&acm->read_lock); | 1081 | spin_lock_init(&acm->read_lock); |
| 1078 | mutex_init(&acm->mutex); | 1082 | mutex_init(&acm->mutex); |
| 1079 | acm->write_ready = 1; | ||
| 1080 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); | 1083 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); |
| 1081 | 1084 | ||
| 1082 | buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); | 1085 | buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); |
| @@ -1108,9 +1111,11 @@ skip_normal_probe: | |||
| 1108 | rcv->instance = acm; | 1111 | rcv->instance = acm; |
| 1109 | } | 1112 | } |
| 1110 | for (i = 0; i < num_rx_buf; i++) { | 1113 | for (i = 0; i < num_rx_buf; i++) { |
| 1111 | struct acm_rb *buf = &(acm->rb[i]); | 1114 | struct acm_rb *rb = &(acm->rb[i]); |
| 1112 | 1115 | ||
| 1113 | if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) { | 1116 | rb->base = usb_buffer_alloc(acm->dev, readsize, |
| 1117 | GFP_KERNEL, &rb->dma); | ||
| 1118 | if (!rb->base) { | ||
| 1114 | dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); | 1119 | dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); |
| 1115 | goto alloc_fail7; | 1120 | goto alloc_fail7; |
| 1116 | } | 1121 | } |
| @@ -1172,6 +1177,7 @@ skip_countries: | |||
| 1172 | acm_set_line(acm, &acm->line); | 1177 | acm_set_line(acm, &acm->line); |
| 1173 | 1178 | ||
| 1174 | usb_driver_claim_interface(&acm_driver, data_interface, acm); | 1179 | usb_driver_claim_interface(&acm_driver, data_interface, acm); |
| 1180 | usb_set_intfdata(data_interface, acm); | ||
| 1175 | 1181 | ||
| 1176 | usb_get_intf(control_interface); | 1182 | usb_get_intf(control_interface); |
| 1177 | tty_register_device(acm_tty_driver, minor, &control_interface->dev); | 1183 | tty_register_device(acm_tty_driver, minor, &control_interface->dev); |
| @@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf) | |||
| 1221 | struct acm *acm = usb_get_intfdata(intf); | 1227 | struct acm *acm = usb_get_intfdata(intf); |
| 1222 | struct usb_device *usb_dev = interface_to_usbdev(intf); | 1228 | struct usb_device *usb_dev = interface_to_usbdev(intf); |
| 1223 | 1229 | ||
| 1224 | mutex_lock(&open_mutex); | 1230 | /* sibling interface is already cleaning up */ |
| 1225 | if (!acm || !acm->dev) { | 1231 | if (!acm) |
| 1226 | mutex_unlock(&open_mutex); | ||
| 1227 | return; | 1232 | return; |
| 1228 | } | 1233 | |
| 1234 | mutex_lock(&open_mutex); | ||
| 1229 | if (acm->country_codes){ | 1235 | if (acm->country_codes){ |
| 1230 | device_remove_file(&acm->control->dev, | 1236 | device_remove_file(&acm->control->dev, |
| 1231 | &dev_attr_wCountryCodes); | 1237 | &dev_attr_wCountryCodes); |
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 85c3aaaab7c5..1f95e7aa1b66 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
| @@ -106,8 +106,6 @@ struct acm { | |||
| 106 | struct list_head spare_read_bufs; | 106 | struct list_head spare_read_bufs; |
| 107 | struct list_head filled_read_bufs; | 107 | struct list_head filled_read_bufs; |
| 108 | int write_used; /* number of non-empty write buffers */ | 108 | int write_used; /* number of non-empty write buffers */ |
| 109 | int write_ready; /* write urb is not running */ | ||
| 110 | int old_ready; | ||
| 111 | int processing; | 109 | int processing; |
| 112 | int transmitting; | 110 | int transmitting; |
| 113 | spinlock_t write_lock; | 111 | spinlock_t write_lock; |
| @@ -115,6 +113,7 @@ struct acm { | |||
| 115 | struct usb_cdc_line_coding line; /* bits, stop, parity */ | 113 | struct usb_cdc_line_coding line; /* bits, stop, parity */ |
| 116 | struct work_struct work; /* work queue entry for line discipline waking up */ | 114 | struct work_struct work; /* work queue entry for line discipline waking up */ |
| 117 | struct work_struct waker; | 115 | struct work_struct waker; |
| 116 | wait_queue_head_t drain_wait; /* close processing */ | ||
| 118 | struct tasklet_struct urb_task; /* rx processing */ | 117 | struct tasklet_struct urb_task; /* rx processing */ |
| 119 | spinlock_t throttle_lock; /* synchronize throtteling and read callback */ | 118 | spinlock_t throttle_lock; /* synchronize throtteling and read callback */ |
| 120 | unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ | 119 | unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index ddb54e14a5c5..2be37fe466f2 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
| @@ -774,7 +774,6 @@ void usb_deregister(struct usb_driver *driver) | |||
| 774 | } | 774 | } |
| 775 | EXPORT_SYMBOL_GPL(usb_deregister); | 775 | EXPORT_SYMBOL_GPL(usb_deregister); |
| 776 | 776 | ||
| 777 | |||
| 778 | /* Forced unbinding of a USB interface driver, either because | 777 | /* Forced unbinding of a USB interface driver, either because |
| 779 | * it doesn't support pre_reset/post_reset/reset_resume or | 778 | * it doesn't support pre_reset/post_reset/reset_resume or |
| 780 | * because it doesn't support suspend/resume. | 779 | * because it doesn't support suspend/resume. |
| @@ -821,6 +820,8 @@ void usb_rebind_intf(struct usb_interface *intf) | |||
| 821 | dev_warn(&intf->dev, "rebind failed: %d\n", rc); | 820 | dev_warn(&intf->dev, "rebind failed: %d\n", rc); |
| 822 | } | 821 | } |
| 823 | 822 | ||
| 823 | #ifdef CONFIG_PM | ||
| 824 | |||
| 824 | #define DO_UNBIND 0 | 825 | #define DO_UNBIND 0 |
| 825 | #define DO_REBIND 1 | 826 | #define DO_REBIND 1 |
| 826 | 827 | ||
| @@ -872,8 +873,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action) | |||
| 872 | } | 873 | } |
| 873 | } | 874 | } |
| 874 | 875 | ||
| 875 | #ifdef CONFIG_PM | ||
| 876 | |||
| 877 | /* Caller has locked udev's pm_mutex */ | 876 | /* Caller has locked udev's pm_mutex */ |
| 878 | static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) | 877 | static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) |
| 879 | { | 878 | { |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 586d6f1376cf..286b4431a097 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
| @@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) | |||
| 1091 | continue; | 1091 | continue; |
| 1092 | dev_dbg(&dev->dev, "unregistering interface %s\n", | 1092 | dev_dbg(&dev->dev, "unregistering interface %s\n", |
| 1093 | dev_name(&interface->dev)); | 1093 | dev_name(&interface->dev)); |
| 1094 | device_del(&interface->dev); | ||
| 1095 | usb_remove_sysfs_intf_files(interface); | 1094 | usb_remove_sysfs_intf_files(interface); |
| 1095 | device_del(&interface->dev); | ||
| 1096 | } | 1096 | } |
| 1097 | 1097 | ||
| 1098 | /* Now that the interfaces are unbound, nobody should | 1098 | /* Now that the interfaces are unbound, nobody should |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index c6a8c6b1116a..acc95b2ac6f8 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
| @@ -284,6 +284,16 @@ config USB_LH7A40X | |||
| 284 | default USB_GADGET | 284 | default USB_GADGET |
| 285 | select USB_GADGET_SELECTED | 285 | select USB_GADGET_SELECTED |
| 286 | 286 | ||
| 287 | # built in ../musb along with host support | ||
| 288 | config USB_GADGET_MUSB_HDRC | ||
| 289 | boolean "Inventra HDRC USB Peripheral (TI, ...)" | ||
| 290 | depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) | ||
| 291 | select USB_GADGET_DUALSPEED | ||
| 292 | select USB_GADGET_SELECTED | ||
| 293 | help | ||
| 294 | This OTG-capable silicon IP is used in dual designs including | ||
| 295 | the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010. | ||
| 296 | |||
| 287 | config USB_GADGET_OMAP | 297 | config USB_GADGET_OMAP |
| 288 | boolean "OMAP USB Device Controller" | 298 | boolean "OMAP USB Device Controller" |
| 289 | depends on ARCH_OMAP | 299 | depends on ARCH_OMAP |
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index 21d1406af9ee..7600a0c78753 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c | |||
| @@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req, | |||
| 542 | req->req.context = dum; | 542 | req->req.context = dum; |
| 543 | req->req.complete = fifo_complete; | 543 | req->req.complete = fifo_complete; |
| 544 | 544 | ||
| 545 | list_add_tail(&req->queue, &ep->queue); | ||
| 545 | spin_unlock (&dum->lock); | 546 | spin_unlock (&dum->lock); |
| 546 | _req->actual = _req->length; | 547 | _req->actual = _req->length; |
| 547 | _req->status = 0; | 548 | _req->status = 0; |
| 548 | _req->complete (_ep, _req); | 549 | _req->complete (_ep, _req); |
| 549 | spin_lock (&dum->lock); | 550 | spin_lock (&dum->lock); |
| 550 | } | 551 | } else |
| 551 | list_add_tail (&req->queue, &ep->queue); | 552 | list_add_tail(&req->queue, &ep->queue); |
| 552 | spin_unlock_irqrestore (&dum->lock, flags); | 553 | spin_unlock_irqrestore (&dum->lock, flags); |
| 553 | 554 | ||
| 554 | /* real hardware would likely enable transfers here, in case | 555 | /* real hardware would likely enable transfers here, in case |
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index d8faccf27895..5ee1590b8e9c 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c | |||
| @@ -47,18 +47,37 @@ struct f_acm { | |||
| 47 | u8 ctrl_id, data_id; | 47 | u8 ctrl_id, data_id; |
| 48 | u8 port_num; | 48 | u8 port_num; |
| 49 | 49 | ||
| 50 | struct usb_descriptor_header **fs_function; | 50 | u8 pending; |
| 51 | |||
| 52 | /* lock is mostly for pending and notify_req ... they get accessed | ||
| 53 | * by callbacks both from tty (open/close/break) under its spinlock, | ||
| 54 | * and notify_req.complete() which can't use that lock. | ||
| 55 | */ | ||
| 56 | spinlock_t lock; | ||
| 57 | |||
| 51 | struct acm_ep_descs fs; | 58 | struct acm_ep_descs fs; |
| 52 | struct usb_descriptor_header **hs_function; | ||
| 53 | struct acm_ep_descs hs; | 59 | struct acm_ep_descs hs; |
| 54 | 60 | ||
| 55 | struct usb_ep *notify; | 61 | struct usb_ep *notify; |
| 56 | struct usb_endpoint_descriptor *notify_desc; | 62 | struct usb_endpoint_descriptor *notify_desc; |
| 63 | struct usb_request *notify_req; | ||
| 57 | 64 | ||
| 58 | struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ | 65 | struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ |
| 66 | |||
| 67 | /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */ | ||
| 59 | u16 port_handshake_bits; | 68 | u16 port_handshake_bits; |
| 60 | #define RS232_RTS (1 << 1) /* unused with full duplex */ | 69 | #define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ |
| 61 | #define RS232_DTR (1 << 0) /* host is ready for data r/w */ | 70 | #define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ |
| 71 | |||
| 72 | /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */ | ||
| 73 | u16 serial_state; | ||
| 74 | #define ACM_CTRL_OVERRUN (1 << 6) | ||
| 75 | #define ACM_CTRL_PARITY (1 << 5) | ||
| 76 | #define ACM_CTRL_FRAMING (1 << 4) | ||
| 77 | #define ACM_CTRL_RI (1 << 3) | ||
| 78 | #define ACM_CTRL_BRK (1 << 2) | ||
| 79 | #define ACM_CTRL_DSR (1 << 1) | ||
| 80 | #define ACM_CTRL_DCD (1 << 0) | ||
| 62 | }; | 81 | }; |
| 63 | 82 | ||
| 64 | static inline struct f_acm *func_to_acm(struct usb_function *f) | 83 | static inline struct f_acm *func_to_acm(struct usb_function *f) |
| @@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f) | |||
| 66 | return container_of(f, struct f_acm, port.func); | 85 | return container_of(f, struct f_acm, port.func); |
| 67 | } | 86 | } |
| 68 | 87 | ||
| 88 | static inline struct f_acm *port_to_acm(struct gserial *p) | ||
| 89 | { | ||
| 90 | return container_of(p, struct f_acm, port); | ||
| 91 | } | ||
| 92 | |||
| 69 | /*-------------------------------------------------------------------------*/ | 93 | /*-------------------------------------------------------------------------*/ |
| 70 | 94 | ||
| 71 | /* notification endpoint uses smallish and infrequent fixed-size messages */ | 95 | /* notification endpoint uses smallish and infrequent fixed-size messages */ |
| 72 | 96 | ||
| 73 | #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ | 97 | #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ |
| 74 | #define GS_NOTIFY_MAXPACKET 8 | 98 | #define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ |
| 75 | 99 | ||
| 76 | /* interface and class descriptors: */ | 100 | /* interface and class descriptors: */ |
| 77 | 101 | ||
| @@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = { | |||
| 117 | .bLength = sizeof(acm_descriptor), | 141 | .bLength = sizeof(acm_descriptor), |
| 118 | .bDescriptorType = USB_DT_CS_INTERFACE, | 142 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 119 | .bDescriptorSubType = USB_CDC_ACM_TYPE, | 143 | .bDescriptorSubType = USB_CDC_ACM_TYPE, |
| 120 | .bmCapabilities = (1 << 1), | 144 | .bmCapabilities = USB_CDC_CAP_LINE, |
| 121 | }; | 145 | }; |
| 122 | 146 | ||
| 123 | static struct usb_cdc_union_desc acm_union_desc __initdata = { | 147 | static struct usb_cdc_union_desc acm_union_desc __initdata = { |
| @@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | |||
| 277 | 301 | ||
| 278 | /* composite driver infrastructure handles everything except | 302 | /* composite driver infrastructure handles everything except |
| 279 | * CDC class messages; interface activation uses set_alt(). | 303 | * CDC class messages; interface activation uses set_alt(). |
| 304 | * | ||
| 305 | * Note CDC spec table 4 lists the ACM request profile. It requires | ||
| 306 | * encapsulated command support ... we don't handle any, and respond | ||
| 307 | * to them by stalling. Options include get/set/clear comm features | ||
| 308 | * (not that useful) and SEND_BREAK. | ||
| 280 | */ | 309 | */ |
| 281 | switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { | 310 | switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { |
| 282 | 311 | ||
| @@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | |||
| 312 | value = 0; | 341 | value = 0; |
| 313 | 342 | ||
| 314 | /* FIXME we should not allow data to flow until the | 343 | /* FIXME we should not allow data to flow until the |
| 315 | * host sets the RS232_DTR bit; and when it clears | 344 | * host sets the ACM_CTRL_DTR bit; and when it clears |
| 316 | * that bit, we should return to that no-flow state. | 345 | * that bit, we should return to that no-flow state. |
| 317 | */ | 346 | */ |
| 318 | acm->port_handshake_bits = w_value; | 347 | acm->port_handshake_bits = w_value; |
| @@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | |||
| 350 | /* we know alt == 0, so this is an activation or a reset */ | 379 | /* we know alt == 0, so this is an activation or a reset */ |
| 351 | 380 | ||
| 352 | if (intf == acm->ctrl_id) { | 381 | if (intf == acm->ctrl_id) { |
| 353 | /* REVISIT this may need more work when we start to | ||
| 354 | * send notifications ... | ||
| 355 | */ | ||
| 356 | if (acm->notify->driver_data) { | 382 | if (acm->notify->driver_data) { |
| 357 | VDBG(cdev, "reset acm control interface %d\n", intf); | 383 | VDBG(cdev, "reset acm control interface %d\n", intf); |
| 358 | usb_ep_disable(acm->notify); | 384 | usb_ep_disable(acm->notify); |
| @@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f) | |||
| 397 | 423 | ||
| 398 | /*-------------------------------------------------------------------------*/ | 424 | /*-------------------------------------------------------------------------*/ |
| 399 | 425 | ||
| 426 | /** | ||
| 427 | * acm_cdc_notify - issue CDC notification to host | ||
| 428 | * @acm: wraps host to be notified | ||
| 429 | * @type: notification type | ||
| 430 | * @value: Refer to cdc specs, wValue field. | ||
| 431 | * @data: data to be sent | ||
| 432 | * @length: size of data | ||
| 433 | * Context: irqs blocked, acm->lock held, acm_notify_req non-null | ||
| 434 | * | ||
| 435 | * Returns zero on sucess or a negative errno. | ||
| 436 | * | ||
| 437 | * See section 6.3.5 of the CDC 1.1 specification for information | ||
| 438 | * about the only notification we issue: SerialState change. | ||
| 439 | */ | ||
| 440 | static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value, | ||
| 441 | void *data, unsigned length) | ||
| 442 | { | ||
| 443 | struct usb_ep *ep = acm->notify; | ||
| 444 | struct usb_request *req; | ||
| 445 | struct usb_cdc_notification *notify; | ||
| 446 | const unsigned len = sizeof(*notify) + length; | ||
| 447 | void *buf; | ||
| 448 | int status; | ||
| 449 | |||
| 450 | req = acm->notify_req; | ||
| 451 | acm->notify_req = NULL; | ||
| 452 | acm->pending = false; | ||
| 453 | |||
| 454 | req->length = len; | ||
| 455 | notify = req->buf; | ||
| 456 | buf = notify + 1; | ||
| 457 | |||
| 458 | notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | ||
| 459 | | USB_RECIP_INTERFACE; | ||
| 460 | notify->bNotificationType = type; | ||
| 461 | notify->wValue = cpu_to_le16(value); | ||
| 462 | notify->wIndex = cpu_to_le16(acm->ctrl_id); | ||
| 463 | notify->wLength = cpu_to_le16(length); | ||
| 464 | memcpy(buf, data, length); | ||
| 465 | |||
| 466 | status = usb_ep_queue(ep, req, GFP_ATOMIC); | ||
| 467 | if (status < 0) { | ||
| 468 | ERROR(acm->port.func.config->cdev, | ||
| 469 | "acm ttyGS%d can't notify serial state, %d\n", | ||
| 470 | acm->port_num, status); | ||
| 471 | acm->notify_req = req; | ||
| 472 | } | ||
| 473 | |||
| 474 | return status; | ||
| 475 | } | ||
| 476 | |||
| 477 | static int acm_notify_serial_state(struct f_acm *acm) | ||
| 478 | { | ||
| 479 | struct usb_composite_dev *cdev = acm->port.func.config->cdev; | ||
| 480 | int status; | ||
| 481 | |||
| 482 | spin_lock(&acm->lock); | ||
| 483 | if (acm->notify_req) { | ||
| 484 | DBG(cdev, "acm ttyGS%d serial state %04x\n", | ||
| 485 | acm->port_num, acm->serial_state); | ||
| 486 | status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, | ||
| 487 | 0, &acm->serial_state, sizeof(acm->serial_state)); | ||
| 488 | } else { | ||
| 489 | acm->pending = true; | ||
| 490 | status = 0; | ||
| 491 | } | ||
| 492 | spin_unlock(&acm->lock); | ||
| 493 | return status; | ||
| 494 | } | ||
| 495 | |||
| 496 | static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) | ||
| 497 | { | ||
| 498 | struct f_acm *acm = req->context; | ||
| 499 | u8 doit = false; | ||
| 500 | |||
| 501 | /* on this call path we do NOT hold the port spinlock, | ||
| 502 | * which is why ACM needs its own spinlock | ||
| 503 | */ | ||
| 504 | spin_lock(&acm->lock); | ||
| 505 | if (req->status != -ESHUTDOWN) | ||
| 506 | doit = acm->pending; | ||
| 507 | acm->notify_req = req; | ||
| 508 | spin_unlock(&acm->lock); | ||
| 509 | |||
| 510 | if (doit) | ||
| 511 | acm_notify_serial_state(acm); | ||
| 512 | } | ||
| 513 | |||
| 514 | /* connect == the TTY link is open */ | ||
| 515 | |||
| 516 | static void acm_connect(struct gserial *port) | ||
| 517 | { | ||
| 518 | struct f_acm *acm = port_to_acm(port); | ||
| 519 | |||
| 520 | acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD; | ||
| 521 | acm_notify_serial_state(acm); | ||
| 522 | } | ||
| 523 | |||
| 524 | static void acm_disconnect(struct gserial *port) | ||
| 525 | { | ||
| 526 | struct f_acm *acm = port_to_acm(port); | ||
| 527 | |||
| 528 | acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD); | ||
| 529 | acm_notify_serial_state(acm); | ||
| 530 | } | ||
| 531 | |||
| 532 | static int acm_send_break(struct gserial *port, int duration) | ||
| 533 | { | ||
| 534 | struct f_acm *acm = port_to_acm(port); | ||
| 535 | u16 state; | ||
| 536 | |||
| 537 | state = acm->serial_state; | ||
| 538 | state &= ~ACM_CTRL_BRK; | ||
| 539 | if (duration) | ||
| 540 | state |= ACM_CTRL_BRK; | ||
| 541 | |||
| 542 | acm->serial_state = state; | ||
| 543 | return acm_notify_serial_state(acm); | ||
| 544 | } | ||
| 545 | |||
| 546 | /*-------------------------------------------------------------------------*/ | ||
| 547 | |||
| 400 | /* ACM function driver setup/binding */ | 548 | /* ACM function driver setup/binding */ |
| 401 | static int __init | 549 | static int __init |
| 402 | acm_bind(struct usb_configuration *c, struct usb_function *f) | 550 | acm_bind(struct usb_configuration *c, struct usb_function *f) |
| @@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 445 | acm->notify = ep; | 593 | acm->notify = ep; |
| 446 | ep->driver_data = cdev; /* claim */ | 594 | ep->driver_data = cdev; /* claim */ |
| 447 | 595 | ||
| 596 | /* allocate notification */ | ||
| 597 | acm->notify_req = gs_alloc_req(ep, | ||
| 598 | sizeof(struct usb_cdc_notification) + 2, | ||
| 599 | GFP_KERNEL); | ||
| 600 | if (!acm->notify_req) | ||
| 601 | goto fail; | ||
| 602 | |||
| 603 | acm->notify_req->complete = acm_cdc_notify_complete; | ||
| 604 | acm->notify_req->context = acm; | ||
| 605 | |||
| 448 | /* copy descriptors, and track endpoint copies */ | 606 | /* copy descriptors, and track endpoint copies */ |
| 449 | f->descriptors = usb_copy_descriptors(acm_fs_function); | 607 | f->descriptors = usb_copy_descriptors(acm_fs_function); |
| 608 | if (!f->descriptors) | ||
| 609 | goto fail; | ||
| 450 | 610 | ||
| 451 | acm->fs.in = usb_find_endpoint(acm_fs_function, | 611 | acm->fs.in = usb_find_endpoint(acm_fs_function, |
| 452 | f->descriptors, &acm_fs_in_desc); | 612 | f->descriptors, &acm_fs_in_desc); |
| @@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 478 | f->hs_descriptors, &acm_hs_notify_desc); | 638 | f->hs_descriptors, &acm_hs_notify_desc); |
| 479 | } | 639 | } |
| 480 | 640 | ||
| 481 | /* FIXME provide a callback for triggering notifications */ | ||
| 482 | |||
| 483 | DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", | 641 | DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", |
| 484 | acm->port_num, | 642 | acm->port_num, |
| 485 | gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", | 643 | gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", |
| @@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 488 | return 0; | 646 | return 0; |
| 489 | 647 | ||
| 490 | fail: | 648 | fail: |
| 649 | if (acm->notify_req) | ||
| 650 | gs_free_req(acm->notify, acm->notify_req); | ||
| 651 | |||
| 491 | /* we might as well release our claims on endpoints */ | 652 | /* we might as well release our claims on endpoints */ |
| 492 | if (acm->notify) | 653 | if (acm->notify) |
| 493 | acm->notify->driver_data = NULL; | 654 | acm->notify->driver_data = NULL; |
| @@ -504,10 +665,13 @@ fail: | |||
| 504 | static void | 665 | static void |
| 505 | acm_unbind(struct usb_configuration *c, struct usb_function *f) | 666 | acm_unbind(struct usb_configuration *c, struct usb_function *f) |
| 506 | { | 667 | { |
| 668 | struct f_acm *acm = func_to_acm(f); | ||
| 669 | |||
| 507 | if (gadget_is_dualspeed(c->cdev->gadget)) | 670 | if (gadget_is_dualspeed(c->cdev->gadget)) |
| 508 | usb_free_descriptors(f->hs_descriptors); | 671 | usb_free_descriptors(f->hs_descriptors); |
| 509 | usb_free_descriptors(f->descriptors); | 672 | usb_free_descriptors(f->descriptors); |
| 510 | kfree(func_to_acm(f)); | 673 | gs_free_req(acm->notify, acm->notify_req); |
| 674 | kfree(acm); | ||
| 511 | } | 675 | } |
| 512 | 676 | ||
| 513 | /* Some controllers can't support CDC ACM ... */ | 677 | /* Some controllers can't support CDC ACM ... */ |
| @@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num) | |||
| 571 | if (!acm) | 735 | if (!acm) |
| 572 | return -ENOMEM; | 736 | return -ENOMEM; |
| 573 | 737 | ||
| 738 | spin_lock_init(&acm->lock); | ||
| 739 | |||
| 574 | acm->port_num = port_num; | 740 | acm->port_num = port_num; |
| 575 | 741 | ||
| 742 | acm->port.connect = acm_connect; | ||
| 743 | acm->port.disconnect = acm_disconnect; | ||
| 744 | acm->port.send_break = acm_send_break; | ||
| 745 | |||
| 576 | acm->port.func.name = "acm"; | 746 | acm->port.func.name = "acm"; |
| 577 | acm->port.func.strings = acm_strings; | 747 | acm->port.func.strings = acm_strings; |
| 578 | /* descriptors are per-instance copies */ | 748 | /* descriptors are per-instance copies */ |
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c index 0822e9d7693a..a2b5c092bda0 100644 --- a/drivers/usb/gadget/f_ecm.c +++ b/drivers/usb/gadget/f_ecm.c | |||
| @@ -63,9 +63,7 @@ struct f_ecm { | |||
| 63 | 63 | ||
| 64 | char ethaddr[14]; | 64 | char ethaddr[14]; |
| 65 | 65 | ||
| 66 | struct usb_descriptor_header **fs_function; | ||
| 67 | struct ecm_ep_descs fs; | 66 | struct ecm_ep_descs fs; |
| 68 | struct usb_descriptor_header **hs_function; | ||
| 69 | struct ecm_ep_descs hs; | 67 | struct ecm_ep_descs hs; |
| 70 | 68 | ||
| 71 | struct usb_ep *notify; | 69 | struct usb_ep *notify; |
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 61652f0f13fd..659b3d9671c4 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c | |||
| @@ -85,9 +85,7 @@ struct f_rndis { | |||
| 85 | u8 ethaddr[ETH_ALEN]; | 85 | u8 ethaddr[ETH_ALEN]; |
| 86 | int config; | 86 | int config; |
| 87 | 87 | ||
| 88 | struct usb_descriptor_header **fs_function; | ||
| 89 | struct rndis_ep_descs fs; | 88 | struct rndis_ep_descs fs; |
| 90 | struct usb_descriptor_header **hs_function; | ||
| 91 | struct rndis_ep_descs hs; | 89 | struct rndis_ep_descs hs; |
| 92 | 90 | ||
| 93 | struct usb_ep *notify; | 91 | struct usb_ep *notify; |
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c index 1b6bde9aaed5..fe5674db344b 100644 --- a/drivers/usb/gadget/f_serial.c +++ b/drivers/usb/gadget/f_serial.c | |||
| @@ -36,9 +36,7 @@ struct f_gser { | |||
| 36 | u8 data_id; | 36 | u8 data_id; |
| 37 | u8 port_num; | 37 | u8 port_num; |
| 38 | 38 | ||
| 39 | struct usb_descriptor_header **fs_function; | ||
| 40 | struct gser_descs fs; | 39 | struct gser_descs fs; |
| 41 | struct usb_descriptor_header **hs_function; | ||
| 42 | struct gser_descs hs; | 40 | struct gser_descs hs; |
| 43 | }; | 41 | }; |
| 44 | 42 | ||
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c index afeab9a0523f..acb8d233aa1d 100644 --- a/drivers/usb/gadget/f_subset.c +++ b/drivers/usb/gadget/f_subset.c | |||
| @@ -66,9 +66,7 @@ struct f_gether { | |||
| 66 | 66 | ||
| 67 | char ethaddr[14]; | 67 | char ethaddr[14]; |
| 68 | 68 | ||
| 69 | struct usb_descriptor_header **fs_function; | ||
| 70 | struct geth_descs fs; | 69 | struct geth_descs fs; |
| 71 | struct usb_descriptor_header **hs_function; | ||
| 72 | struct geth_descs hs; | 70 | struct geth_descs hs; |
| 73 | }; | 71 | }; |
| 74 | 72 | ||
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 5246e8fef2b2..17d9905101b7 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h | |||
| @@ -11,6 +11,10 @@ | |||
| 11 | * Some are available on 2.4 kernels; several are available, but not | 11 | * Some are available on 2.4 kernels; several are available, but not |
| 12 | * yet pushed in the 2.6 mainline tree. | 12 | * yet pushed in the 2.6 mainline tree. |
| 13 | */ | 13 | */ |
| 14 | |||
| 15 | #ifndef __GADGET_CHIPS_H | ||
| 16 | #define __GADGET_CHIPS_H | ||
| 17 | |||
| 14 | #ifdef CONFIG_USB_GADGET_NET2280 | 18 | #ifdef CONFIG_USB_GADGET_NET2280 |
| 15 | #define gadget_is_net2280(g) !strcmp("net2280", (g)->name) | 19 | #define gadget_is_net2280(g) !strcmp("net2280", (g)->name) |
| 16 | #else | 20 | #else |
| @@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget) | |||
| 237 | /* Everything else is *presumably* fine ... */ | 241 | /* Everything else is *presumably* fine ... */ |
| 238 | return true; | 242 | return true; |
| 239 | } | 243 | } |
| 244 | |||
| 245 | #endif /* __GADGET_CHIPS_H */ | ||
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 376e80c07530..574c53831a05 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
| @@ -54,6 +54,7 @@ | |||
| 54 | 54 | ||
| 55 | #include <mach/dma.h> | 55 | #include <mach/dma.h> |
| 56 | #include <mach/usb.h> | 56 | #include <mach/usb.h> |
| 57 | #include <mach/control.h> | ||
| 57 | 58 | ||
| 58 | #include "omap_udc.h" | 59 | #include "omap_udc.h" |
| 59 | 60 | ||
| @@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s) | |||
| 2310 | u32 trans; | 2311 | u32 trans; |
| 2311 | char *ctrl_name; | 2312 | char *ctrl_name; |
| 2312 | 2313 | ||
| 2313 | tmp = OTG_REV_REG; | 2314 | tmp = omap_readl(OTG_REV); |
| 2314 | if (cpu_is_omap24xx()) { | 2315 | if (cpu_is_omap24xx()) { |
| 2315 | ctrl_name = "control_devconf"; | 2316 | ctrl_name = "control_devconf"; |
| 2316 | trans = CONTROL_DEVCONF_REG; | 2317 | trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); |
| 2317 | } else { | 2318 | } else { |
| 2318 | ctrl_name = "tranceiver_ctrl"; | 2319 | ctrl_name = "tranceiver_ctrl"; |
| 2319 | trans = omap_readw(USB_TRANSCEIVER_CTRL); | 2320 | trans = omap_readw(USB_TRANSCEIVER_CTRL); |
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index abf9505d3a75..53d59287f2bc 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
| @@ -52,13 +52,16 @@ | |||
| 52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. | 52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. |
| 53 | */ | 53 | */ |
| 54 | 54 | ||
| 55 | #define PREFIX "ttyGS" | ||
| 56 | |||
| 55 | /* | 57 | /* |
| 56 | * gserial is the lifecycle interface, used by USB functions | 58 | * gserial is the lifecycle interface, used by USB functions |
| 57 | * gs_port is the I/O nexus, used by the tty driver | 59 | * gs_port is the I/O nexus, used by the tty driver |
| 58 | * tty_struct links to the tty/filesystem framework | 60 | * tty_struct links to the tty/filesystem framework |
| 59 | * | 61 | * |
| 60 | * gserial <---> gs_port ... links will be null when the USB link is | 62 | * gserial <---> gs_port ... links will be null when the USB link is |
| 61 | * inactive; managed by gserial_{connect,disconnect}(). | 63 | * inactive; managed by gserial_{connect,disconnect}(). each gserial |
| 64 | * instance can wrap its own USB control protocol. | ||
| 62 | * gserial->ioport == usb_ep->driver_data ... gs_port | 65 | * gserial->ioport == usb_ep->driver_data ... gs_port |
| 63 | * gs_port->port_usb ... gserial | 66 | * gs_port->port_usb ... gserial |
| 64 | * | 67 | * |
| @@ -100,6 +103,8 @@ struct gs_port { | |||
| 100 | wait_queue_head_t close_wait; /* wait for last close */ | 103 | wait_queue_head_t close_wait; /* wait for last close */ |
| 101 | 104 | ||
| 102 | struct list_head read_pool; | 105 | struct list_head read_pool; |
| 106 | struct list_head read_queue; | ||
| 107 | unsigned n_read; | ||
| 103 | struct tasklet_struct push; | 108 | struct tasklet_struct push; |
| 104 | 109 | ||
| 105 | struct list_head write_pool; | 110 | struct list_head write_pool; |
| @@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb) | |||
| 177 | /* | 182 | /* |
| 178 | * gs_buf_data_avail | 183 | * gs_buf_data_avail |
| 179 | * | 184 | * |
| 180 | * Return the number of bytes of data available in the circular | 185 | * Return the number of bytes of data written into the circular |
| 181 | * buffer. | 186 | * buffer. |
| 182 | */ | 187 | */ |
| 183 | static unsigned gs_buf_data_avail(struct gs_buf *gb) | 188 | static unsigned gs_buf_data_avail(struct gs_buf *gb) |
| @@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) | |||
| 278 | * Allocate a usb_request and its buffer. Returns a pointer to the | 283 | * Allocate a usb_request and its buffer. Returns a pointer to the |
| 279 | * usb_request or NULL if there is an error. | 284 | * usb_request or NULL if there is an error. |
| 280 | */ | 285 | */ |
| 281 | static struct usb_request * | 286 | struct usb_request * |
| 282 | gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) | 287 | gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) |
| 283 | { | 288 | { |
| 284 | struct usb_request *req; | 289 | struct usb_request *req; |
| @@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) | |||
| 302 | * | 307 | * |
| 303 | * Free a usb_request and its buffer. | 308 | * Free a usb_request and its buffer. |
| 304 | */ | 309 | */ |
| 305 | static void gs_free_req(struct usb_ep *ep, struct usb_request *req) | 310 | void gs_free_req(struct usb_ep *ep, struct usb_request *req) |
| 306 | { | 311 | { |
| 307 | kfree(req->buf); | 312 | kfree(req->buf); |
| 308 | usb_ep_free_request(ep, req); | 313 | usb_ep_free_request(ep, req); |
| @@ -367,11 +372,9 @@ __acquires(&port->port_lock) | |||
| 367 | req->length = len; | 372 | req->length = len; |
| 368 | list_del(&req->list); | 373 | list_del(&req->list); |
| 369 | 374 | ||
| 370 | #ifdef VERBOSE_DEBUG | 375 | pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", |
| 371 | pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", | 376 | port->port_num, len, *((u8 *)req->buf), |
| 372 | __func__, in->name, len, *((u8 *)req->buf), | ||
| 373 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); | 377 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); |
| 374 | #endif | ||
| 375 | 378 | ||
| 376 | /* Drop lock while we call out of driver; completions | 379 | /* Drop lock while we call out of driver; completions |
| 377 | * could be issued while we do so. Disconnection may | 380 | * could be issued while we do so. Disconnection may |
| @@ -401,56 +404,6 @@ __acquires(&port->port_lock) | |||
| 401 | return status; | 404 | return status; |
| 402 | } | 405 | } |
| 403 | 406 | ||
| 404 | static void gs_rx_push(unsigned long _port) | ||
| 405 | { | ||
| 406 | struct gs_port *port = (void *)_port; | ||
| 407 | struct tty_struct *tty = port->port_tty; | ||
| 408 | |||
| 409 | /* With low_latency, tty_flip_buffer_push() doesn't put its | ||
| 410 | * real work through a workqueue, so the ldisc has a better | ||
| 411 | * chance to keep up with peak USB data rates. | ||
| 412 | */ | ||
| 413 | if (tty) { | ||
| 414 | tty_flip_buffer_push(tty); | ||
| 415 | wake_up_interruptible(&tty->read_wait); | ||
| 416 | } | ||
| 417 | } | ||
| 418 | |||
| 419 | /* | ||
| 420 | * gs_recv_packet | ||
| 421 | * | ||
| 422 | * Called for each USB packet received. Reads the packet | ||
| 423 | * header and stuffs the data in the appropriate tty buffer. | ||
| 424 | * Returns 0 if successful, or a negative error number. | ||
| 425 | * | ||
| 426 | * Called during USB completion routine, on interrupt time. | ||
| 427 | * With port_lock. | ||
| 428 | */ | ||
| 429 | static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size) | ||
| 430 | { | ||
| 431 | unsigned len; | ||
| 432 | struct tty_struct *tty; | ||
| 433 | |||
| 434 | /* I/O completions can continue for a while after close(), until the | ||
| 435 | * request queue empties. Just discard any data we receive, until | ||
| 436 | * something reopens this TTY ... as if there were no HW flow control. | ||
| 437 | */ | ||
| 438 | tty = port->port_tty; | ||
| 439 | if (tty == NULL) { | ||
| 440 | pr_vdebug("%s: ttyGS%d, after close\n", | ||
| 441 | __func__, port->port_num); | ||
| 442 | return -EIO; | ||
| 443 | } | ||
| 444 | |||
| 445 | len = tty_insert_flip_string(tty, packet, size); | ||
| 446 | if (len > 0) | ||
| 447 | tasklet_schedule(&port->push); | ||
| 448 | if (len < size) | ||
| 449 | pr_debug("%s: ttyGS%d, drop %d bytes\n", | ||
| 450 | __func__, port->port_num, size - len); | ||
| 451 | return 0; | ||
| 452 | } | ||
| 453 | |||
| 454 | /* | 407 | /* |
| 455 | * Context: caller owns port_lock, and port_usb is set | 408 | * Context: caller owns port_lock, and port_usb is set |
| 456 | */ | 409 | */ |
| @@ -469,9 +422,9 @@ __acquires(&port->port_lock) | |||
| 469 | int status; | 422 | int status; |
| 470 | struct tty_struct *tty; | 423 | struct tty_struct *tty; |
| 471 | 424 | ||
| 472 | /* no more rx if closed or throttled */ | 425 | /* no more rx if closed */ |
| 473 | tty = port->port_tty; | 426 | tty = port->port_tty; |
| 474 | if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) | 427 | if (!tty) |
| 475 | break; | 428 | break; |
| 476 | 429 | ||
| 477 | req = list_entry(pool->next, struct usb_request, list); | 430 | req = list_entry(pool->next, struct usb_request, list); |
| @@ -500,36 +453,134 @@ __acquires(&port->port_lock) | |||
| 500 | return started; | 453 | return started; |
| 501 | } | 454 | } |
| 502 | 455 | ||
| 503 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | 456 | /* |
| 457 | * RX tasklet takes data out of the RX queue and hands it up to the TTY | ||
| 458 | * layer until it refuses to take any more data (or is throttled back). | ||
| 459 | * Then it issues reads for any further data. | ||
| 460 | * | ||
| 461 | * If the RX queue becomes full enough that no usb_request is queued, | ||
| 462 | * the OUT endpoint may begin NAKing as soon as its FIFO fills up. | ||
| 463 | * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) | ||
| 464 | * can be buffered before the TTY layer's buffers (currently 64 KB). | ||
| 465 | */ | ||
| 466 | static void gs_rx_push(unsigned long _port) | ||
| 504 | { | 467 | { |
| 505 | int status; | 468 | struct gs_port *port = (void *)_port; |
| 506 | struct gs_port *port = ep->driver_data; | 469 | struct tty_struct *tty; |
| 470 | struct list_head *queue = &port->read_queue; | ||
| 471 | bool disconnect = false; | ||
| 472 | bool do_push = false; | ||
| 507 | 473 | ||
| 508 | spin_lock(&port->port_lock); | 474 | /* hand any queued data to the tty */ |
| 509 | list_add(&req->list, &port->read_pool); | 475 | spin_lock_irq(&port->port_lock); |
| 476 | tty = port->port_tty; | ||
| 477 | while (!list_empty(queue)) { | ||
| 478 | struct usb_request *req; | ||
| 510 | 479 | ||
| 511 | switch (req->status) { | 480 | req = list_first_entry(queue, struct usb_request, list); |
| 512 | case 0: | ||
| 513 | /* normal completion */ | ||
| 514 | status = gs_recv_packet(port, req->buf, req->actual); | ||
| 515 | if (status && status != -EIO) | ||
| 516 | pr_debug("%s: %s %s err %d\n", | ||
| 517 | __func__, "recv", ep->name, status); | ||
| 518 | gs_start_rx(port); | ||
| 519 | break; | ||
| 520 | 481 | ||
| 521 | case -ESHUTDOWN: | 482 | /* discard data if tty was closed */ |
| 522 | /* disconnect */ | 483 | if (!tty) |
| 523 | pr_vdebug("%s: %s shutdown\n", __func__, ep->name); | 484 | goto recycle; |
| 524 | break; | ||
| 525 | 485 | ||
| 526 | default: | 486 | /* leave data queued if tty was rx throttled */ |
| 527 | /* presumably a transient fault */ | 487 | if (test_bit(TTY_THROTTLED, &tty->flags)) |
| 528 | pr_warning("%s: unexpected %s status %d\n", | 488 | break; |
| 529 | __func__, ep->name, req->status); | 489 | |
| 530 | gs_start_rx(port); | 490 | switch (req->status) { |
| 531 | break; | 491 | case -ESHUTDOWN: |
| 492 | disconnect = true; | ||
| 493 | pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); | ||
| 494 | break; | ||
| 495 | |||
| 496 | default: | ||
| 497 | /* presumably a transient fault */ | ||
| 498 | pr_warning(PREFIX "%d: unexpected RX status %d\n", | ||
| 499 | port->port_num, req->status); | ||
| 500 | /* FALLTHROUGH */ | ||
| 501 | case 0: | ||
| 502 | /* normal completion */ | ||
| 503 | break; | ||
| 504 | } | ||
| 505 | |||
| 506 | /* push data to (open) tty */ | ||
| 507 | if (req->actual) { | ||
| 508 | char *packet = req->buf; | ||
| 509 | unsigned size = req->actual; | ||
| 510 | unsigned n; | ||
| 511 | int count; | ||
| 512 | |||
| 513 | /* we may have pushed part of this packet already... */ | ||
| 514 | n = port->n_read; | ||
| 515 | if (n) { | ||
| 516 | packet += n; | ||
| 517 | size -= n; | ||
| 518 | } | ||
| 519 | |||
| 520 | count = tty_insert_flip_string(tty, packet, size); | ||
| 521 | if (count) | ||
| 522 | do_push = true; | ||
| 523 | if (count != size) { | ||
| 524 | /* stop pushing; TTY layer can't handle more */ | ||
| 525 | port->n_read += count; | ||
| 526 | pr_vdebug(PREFIX "%d: rx block %d/%d\n", | ||
| 527 | port->port_num, | ||
| 528 | count, req->actual); | ||
| 529 | break; | ||
| 530 | } | ||
| 531 | port->n_read = 0; | ||
| 532 | } | ||
| 533 | recycle: | ||
| 534 | list_move(&req->list, &port->read_pool); | ||
| 532 | } | 535 | } |
| 536 | |||
| 537 | /* Push from tty to ldisc; this is immediate with low_latency, and | ||
| 538 | * may trigger callbacks to this driver ... so drop the spinlock. | ||
| 539 | */ | ||
| 540 | if (tty && do_push) { | ||
| 541 | spin_unlock_irq(&port->port_lock); | ||
| 542 | tty_flip_buffer_push(tty); | ||
| 543 | wake_up_interruptible(&tty->read_wait); | ||
| 544 | spin_lock_irq(&port->port_lock); | ||
| 545 | |||
| 546 | /* tty may have been closed */ | ||
| 547 | tty = port->port_tty; | ||
| 548 | } | ||
| 549 | |||
| 550 | |||
| 551 | /* We want our data queue to become empty ASAP, keeping data | ||
| 552 | * in the tty and ldisc (not here). If we couldn't push any | ||
| 553 | * this time around, there may be trouble unless there's an | ||
| 554 | * implicit tty_unthrottle() call on its way... | ||
| 555 | * | ||
| 556 | * REVISIT we should probably add a timer to keep the tasklet | ||
| 557 | * from starving ... but it's not clear that case ever happens. | ||
| 558 | */ | ||
| 559 | if (!list_empty(queue) && tty) { | ||
| 560 | if (!test_bit(TTY_THROTTLED, &tty->flags)) { | ||
| 561 | if (do_push) | ||
| 562 | tasklet_schedule(&port->push); | ||
| 563 | else | ||
| 564 | pr_warning(PREFIX "%d: RX not scheduled?\n", | ||
| 565 | port->port_num); | ||
| 566 | } | ||
| 567 | } | ||
| 568 | |||
| 569 | /* If we're still connected, refill the USB RX queue. */ | ||
| 570 | if (!disconnect && port->port_usb) | ||
| 571 | gs_start_rx(port); | ||
| 572 | |||
| 573 | spin_unlock_irq(&port->port_lock); | ||
| 574 | } | ||
| 575 | |||
| 576 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | ||
| 577 | { | ||
| 578 | struct gs_port *port = ep->driver_data; | ||
| 579 | |||
| 580 | /* Queue all received data until the tty layer is ready for it. */ | ||
| 581 | spin_lock(&port->port_lock); | ||
| 582 | list_add_tail(&req->list, &port->read_queue); | ||
| 583 | tasklet_schedule(&port->push); | ||
| 533 | spin_unlock(&port->port_lock); | 584 | spin_unlock(&port->port_lock); |
| 534 | } | 585 | } |
| 535 | 586 | ||
| @@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port) | |||
| 625 | } | 676 | } |
| 626 | 677 | ||
| 627 | /* queue read requests */ | 678 | /* queue read requests */ |
| 679 | port->n_read = 0; | ||
| 628 | started = gs_start_rx(port); | 680 | started = gs_start_rx(port); |
| 629 | 681 | ||
| 630 | /* unblock any pending writes into our circular buffer */ | 682 | /* unblock any pending writes into our circular buffer */ |
| @@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port) | |||
| 633 | } else { | 685 | } else { |
| 634 | gs_free_requests(ep, head); | 686 | gs_free_requests(ep, head); |
| 635 | gs_free_requests(port->port_usb->in, &port->write_pool); | 687 | gs_free_requests(port->port_usb->in, &port->write_pool); |
| 688 | status = -EIO; | ||
| 636 | } | 689 | } |
| 637 | 690 | ||
| 638 | return started ? 0 : status; | 691 | return status; |
| 639 | } | 692 | } |
| 640 | 693 | ||
| 641 | /*-------------------------------------------------------------------------*/ | 694 | /*-------------------------------------------------------------------------*/ |
| @@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
| 736 | 789 | ||
| 737 | /* if connected, start the I/O stream */ | 790 | /* if connected, start the I/O stream */ |
| 738 | if (port->port_usb) { | 791 | if (port->port_usb) { |
| 792 | struct gserial *gser = port->port_usb; | ||
| 793 | |||
| 739 | pr_debug("gs_open: start ttyGS%d\n", port->port_num); | 794 | pr_debug("gs_open: start ttyGS%d\n", port->port_num); |
| 740 | gs_start_io(port); | 795 | gs_start_io(port); |
| 741 | 796 | ||
| 742 | /* REVISIT for ACM, issue "network connected" event */ | 797 | if (gser->connect) |
| 798 | gser->connect(gser); | ||
| 743 | } | 799 | } |
| 744 | 800 | ||
| 745 | pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); | 801 | pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); |
| @@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p) | |||
| 766 | static void gs_close(struct tty_struct *tty, struct file *file) | 822 | static void gs_close(struct tty_struct *tty, struct file *file) |
| 767 | { | 823 | { |
| 768 | struct gs_port *port = tty->driver_data; | 824 | struct gs_port *port = tty->driver_data; |
| 825 | struct gserial *gser; | ||
| 769 | 826 | ||
| 770 | spin_lock_irq(&port->port_lock); | 827 | spin_lock_irq(&port->port_lock); |
| 771 | 828 | ||
| @@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file) | |||
| 785 | port->openclose = true; | 842 | port->openclose = true; |
| 786 | port->open_count = 0; | 843 | port->open_count = 0; |
| 787 | 844 | ||
| 788 | if (port->port_usb) | 845 | gser = port->port_usb; |
| 789 | /* REVISIT for ACM, issue "network disconnected" event */; | 846 | if (gser && gser->disconnect) |
| 847 | gser->disconnect(gser); | ||
| 790 | 848 | ||
| 791 | /* wait for circular write buffer to drain, disconnect, or at | 849 | /* wait for circular write buffer to drain, disconnect, or at |
| 792 | * most GS_CLOSE_TIMEOUT seconds; then discard the rest | 850 | * most GS_CLOSE_TIMEOUT seconds; then discard the rest |
| 793 | */ | 851 | */ |
| 794 | if (gs_buf_data_avail(&port->port_write_buf) > 0 | 852 | if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { |
| 795 | && port->port_usb) { | ||
| 796 | spin_unlock_irq(&port->port_lock); | 853 | spin_unlock_irq(&port->port_lock); |
| 797 | wait_event_interruptible_timeout(port->drain_wait, | 854 | wait_event_interruptible_timeout(port->drain_wait, |
| 798 | gs_writes_finished(port), | 855 | gs_writes_finished(port), |
| 799 | GS_CLOSE_TIMEOUT * HZ); | 856 | GS_CLOSE_TIMEOUT * HZ); |
| 800 | spin_lock_irq(&port->port_lock); | 857 | spin_lock_irq(&port->port_lock); |
| 858 | gser = port->port_usb; | ||
| 801 | } | 859 | } |
| 802 | 860 | ||
| 803 | /* Iff we're disconnected, there can be no I/O in flight so it's | 861 | /* Iff we're disconnected, there can be no I/O in flight so it's |
| 804 | * ok to free the circular buffer; else just scrub it. And don't | 862 | * ok to free the circular buffer; else just scrub it. And don't |
| 805 | * let the push tasklet fire again until we're re-opened. | 863 | * let the push tasklet fire again until we're re-opened. |
| 806 | */ | 864 | */ |
| 807 | if (port->port_usb == NULL) | 865 | if (gser == NULL) |
| 808 | gs_buf_free(&port->port_write_buf); | 866 | gs_buf_free(&port->port_write_buf); |
| 809 | else | 867 | else |
| 810 | gs_buf_clear(&port->port_write_buf); | 868 | gs_buf_clear(&port->port_write_buf); |
| 811 | 869 | ||
| 812 | tasklet_kill(&port->push); | ||
| 813 | |||
| 814 | tty->driver_data = NULL; | 870 | tty->driver_data = NULL; |
| 815 | port->port_tty = NULL; | 871 | port->port_tty = NULL; |
| 816 | 872 | ||
| @@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty) | |||
| 911 | { | 967 | { |
| 912 | struct gs_port *port = tty->driver_data; | 968 | struct gs_port *port = tty->driver_data; |
| 913 | unsigned long flags; | 969 | unsigned long flags; |
| 914 | unsigned started = 0; | ||
| 915 | 970 | ||
| 916 | spin_lock_irqsave(&port->port_lock, flags); | 971 | spin_lock_irqsave(&port->port_lock, flags); |
| 917 | if (port->port_usb) | 972 | if (port->port_usb) { |
| 918 | started = gs_start_rx(port); | 973 | /* Kickstart read queue processing. We don't do xon/xoff, |
| 974 | * rts/cts, or other handshaking with the host, but if the | ||
| 975 | * read queue backs up enough we'll be NAKing OUT packets. | ||
| 976 | */ | ||
| 977 | tasklet_schedule(&port->push); | ||
| 978 | pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); | ||
| 979 | } | ||
| 919 | spin_unlock_irqrestore(&port->port_lock, flags); | 980 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 981 | } | ||
| 982 | |||
| 983 | static int gs_break_ctl(struct tty_struct *tty, int duration) | ||
| 984 | { | ||
| 985 | struct gs_port *port = tty->driver_data; | ||
| 986 | int status = 0; | ||
| 987 | struct gserial *gser; | ||
| 988 | |||
| 989 | pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", | ||
| 990 | port->port_num, duration); | ||
| 920 | 991 | ||
| 921 | pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", | 992 | spin_lock_irq(&port->port_lock); |
| 922 | port->port_num, started); | 993 | gser = port->port_usb; |
| 994 | if (gser && gser->send_break) | ||
| 995 | status = gser->send_break(gser, duration); | ||
| 996 | spin_unlock_irq(&port->port_lock); | ||
| 997 | |||
| 998 | return status; | ||
| 923 | } | 999 | } |
| 924 | 1000 | ||
| 925 | static const struct tty_operations gs_tty_ops = { | 1001 | static const struct tty_operations gs_tty_ops = { |
| @@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = { | |||
| 931 | .write_room = gs_write_room, | 1007 | .write_room = gs_write_room, |
| 932 | .chars_in_buffer = gs_chars_in_buffer, | 1008 | .chars_in_buffer = gs_chars_in_buffer, |
| 933 | .unthrottle = gs_unthrottle, | 1009 | .unthrottle = gs_unthrottle, |
| 1010 | .break_ctl = gs_break_ctl, | ||
| 934 | }; | 1011 | }; |
| 935 | 1012 | ||
| 936 | /*-------------------------------------------------------------------------*/ | 1013 | /*-------------------------------------------------------------------------*/ |
| @@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) | |||
| 953 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); | 1030 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); |
| 954 | 1031 | ||
| 955 | INIT_LIST_HEAD(&port->read_pool); | 1032 | INIT_LIST_HEAD(&port->read_pool); |
| 1033 | INIT_LIST_HEAD(&port->read_queue); | ||
| 956 | INIT_LIST_HEAD(&port->write_pool); | 1034 | INIT_LIST_HEAD(&port->write_pool); |
| 957 | 1035 | ||
| 958 | port->port_num = port_num; | 1036 | port->port_num = port_num; |
| @@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count) | |||
| 997 | 1075 | ||
| 998 | gs_tty_driver->owner = THIS_MODULE; | 1076 | gs_tty_driver->owner = THIS_MODULE; |
| 999 | gs_tty_driver->driver_name = "g_serial"; | 1077 | gs_tty_driver->driver_name = "g_serial"; |
| 1000 | gs_tty_driver->name = "ttyGS"; | 1078 | gs_tty_driver->name = PREFIX; |
| 1001 | /* uses dynamically assigned dev_t values */ | 1079 | /* uses dynamically assigned dev_t values */ |
| 1002 | 1080 | ||
| 1003 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | 1081 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; |
| @@ -1104,6 +1182,8 @@ void gserial_cleanup(void) | |||
| 1104 | ports[i].port = NULL; | 1182 | ports[i].port = NULL; |
| 1105 | mutex_unlock(&ports[i].lock); | 1183 | mutex_unlock(&ports[i].lock); |
| 1106 | 1184 | ||
| 1185 | tasklet_kill(&port->push); | ||
| 1186 | |||
| 1107 | /* wait for old opens to finish */ | 1187 | /* wait for old opens to finish */ |
| 1108 | wait_event(port->close_wait, gs_closed(port)); | 1188 | wait_event(port->close_wait, gs_closed(port)); |
| 1109 | 1189 | ||
| @@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num) | |||
| 1175 | 1255 | ||
| 1176 | /* REVISIT if waiting on "carrier detect", signal. */ | 1256 | /* REVISIT if waiting on "carrier detect", signal. */ |
| 1177 | 1257 | ||
| 1178 | /* REVISIT for ACM, issue "network connection" status notification: | 1258 | /* if it's already open, start I/O ... and notify the serial |
| 1179 | * connected if open_count, else disconnected. | 1259 | * protocol about open/close status (connect/disconnect). |
| 1180 | */ | 1260 | */ |
| 1181 | |||
| 1182 | /* if it's already open, start I/O */ | ||
| 1183 | if (port->open_count) { | 1261 | if (port->open_count) { |
| 1184 | pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); | 1262 | pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); |
| 1185 | gs_start_io(port); | 1263 | gs_start_io(port); |
| 1264 | if (gser->connect) | ||
| 1265 | gser->connect(gser); | ||
| 1266 | } else { | ||
| 1267 | if (gser->disconnect) | ||
| 1268 | gser->disconnect(gser); | ||
| 1186 | } | 1269 | } |
| 1187 | 1270 | ||
| 1188 | spin_unlock_irqrestore(&port->port_lock, flags); | 1271 | spin_unlock_irqrestore(&port->port_lock, flags); |
| @@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser) | |||
| 1241 | if (port->open_count == 0 && !port->openclose) | 1324 | if (port->open_count == 0 && !port->openclose) |
| 1242 | gs_buf_free(&port->port_write_buf); | 1325 | gs_buf_free(&port->port_write_buf); |
| 1243 | gs_free_requests(gser->out, &port->read_pool); | 1326 | gs_free_requests(gser->out, &port->read_pool); |
| 1327 | gs_free_requests(gser->out, &port->read_queue); | ||
| 1244 | gs_free_requests(gser->in, &port->write_pool); | 1328 | gs_free_requests(gser->in, &port->write_pool); |
| 1245 | spin_unlock_irqrestore(&port->port_lock, flags); | 1329 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1246 | } | 1330 | } |
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h index 7b561138f90e..af3910d01aea 100644 --- a/drivers/usb/gadget/u_serial.h +++ b/drivers/usb/gadget/u_serial.h | |||
| @@ -23,8 +23,7 @@ | |||
| 23 | * style I/O using the USB peripheral endpoints listed here, including | 23 | * style I/O using the USB peripheral endpoints listed here, including |
| 24 | * hookups to sysfs and /dev for each logical "tty" device. | 24 | * hookups to sysfs and /dev for each logical "tty" device. |
| 25 | * | 25 | * |
| 26 | * REVISIT need TTY --> USB event flow too, so ACM can report open/close | 26 | * REVISIT at least ACM could support tiocmget() if needed. |
| 27 | * as carrier detect events. Model after ECM. There's more ACM state too. | ||
| 28 | * | 27 | * |
| 29 | * REVISIT someday, allow multiplexing several TTYs over these endpoints. | 28 | * REVISIT someday, allow multiplexing several TTYs over these endpoints. |
| 30 | */ | 29 | */ |
| @@ -41,8 +40,17 @@ struct gserial { | |||
| 41 | 40 | ||
| 42 | /* REVISIT avoid this CDC-ACM support harder ... */ | 41 | /* REVISIT avoid this CDC-ACM support harder ... */ |
| 43 | struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ | 42 | struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ |
| 43 | |||
| 44 | /* notification callbacks */ | ||
| 45 | void (*connect)(struct gserial *p); | ||
| 46 | void (*disconnect)(struct gserial *p); | ||
| 47 | int (*send_break)(struct gserial *p, int duration); | ||
| 44 | }; | 48 | }; |
| 45 | 49 | ||
| 50 | /* utilities to allocate/free request and buffer */ | ||
| 51 | struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags); | ||
| 52 | void gs_free_req(struct usb_ep *, struct usb_request *req); | ||
| 53 | |||
| 46 | /* port setup/teardown is handled by gadget driver */ | 54 | /* port setup/teardown is handled by gadget driver */ |
| 47 | int gserial_setup(struct usb_gadget *g, unsigned n_ports); | 55 | int gserial_setup(struct usb_gadget *g, unsigned n_ports); |
| 48 | void gserial_cleanup(void); | 56 | void gserial_cleanup(void); |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index c858f2adb929..d22a84f86a33 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
| @@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs) | |||
| 126 | * doesn't quite work because some people have to enforce 32-bit access | 126 | * doesn't quite work because some people have to enforce 32-bit access |
| 127 | */ | 127 | */ |
| 128 | static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, | 128 | static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, |
| 129 | __u32 __iomem *dst, u32 offset, u32 len) | 129 | __u32 __iomem *dst, u32 len) |
| 130 | { | 130 | { |
| 131 | struct usb_hcd *hcd = priv_to_hcd(priv); | ||
| 132 | u32 val; | 131 | u32 val; |
| 133 | u8 *buff8; | 132 | u8 *buff8; |
| 134 | 133 | ||
| @@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, | |||
| 136 | printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); | 135 | printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); |
| 137 | return; | 136 | return; |
| 138 | } | 137 | } |
| 139 | isp1760_writel(offset, hcd->regs + HC_MEMORY_REG); | ||
| 140 | /* XXX | ||
| 141 | * 90nsec delay, the spec says something how this could be avoided. | ||
| 142 | */ | ||
| 143 | mdelay(1); | ||
| 144 | 138 | ||
| 145 | while (len >= 4) { | 139 | while (len >= 4) { |
| 146 | *src = __raw_readl(dst); | 140 | *src = __raw_readl(dst); |
| @@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd) | |||
| 987 | printk(KERN_ERR "qh is 0\n"); | 981 | printk(KERN_ERR "qh is 0\n"); |
| 988 | continue; | 982 | continue; |
| 989 | } | 983 | } |
| 990 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, | 984 | isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs + |
| 991 | atl_regs, sizeof(ptd)); | 985 | HC_MEMORY_REG); |
| 986 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
| 987 | HC_MEMORY_REG); | ||
| 988 | /* | ||
| 989 | * write bank1 address twice to ensure the 90ns delay (time | ||
| 990 | * between BANK0 write and the priv_read_copy() call is at | ||
| 991 | * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns) | ||
| 992 | */ | ||
| 993 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
| 994 | HC_MEMORY_REG); | ||
| 995 | |||
| 996 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs + | ||
| 997 | ISP_BANK(0), sizeof(ptd)); | ||
| 992 | 998 | ||
| 993 | dw1 = le32_to_cpu(ptd.dw1); | 999 | dw1 = le32_to_cpu(ptd.dw1); |
| 994 | dw2 = le32_to_cpu(ptd.dw2); | 1000 | dw2 = le32_to_cpu(ptd.dw2); |
| @@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) | |||
| 1091 | case IN_PID: | 1097 | case IN_PID: |
| 1092 | priv_read_copy(priv, | 1098 | priv_read_copy(priv, |
| 1093 | priv->atl_ints[queue_entry].data_buffer, | 1099 | priv->atl_ints[queue_entry].data_buffer, |
| 1094 | usb_hcd->regs + payload, payload, | 1100 | usb_hcd->regs + payload + ISP_BANK(1), |
| 1095 | length); | 1101 | length); |
| 1096 | 1102 | ||
| 1097 | case OUT_PID: | 1103 | case OUT_PID: |
| @@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) | |||
| 1122 | } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { | 1128 | } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { |
| 1123 | /* short BULK received */ | 1129 | /* short BULK received */ |
| 1124 | 1130 | ||
| 1125 | printk(KERN_ERR "short bulk, %d instead %zu\n", length, | ||
| 1126 | qtd->length); | ||
| 1127 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { | 1131 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
| 1128 | urb->status = -EREMOTEIO; | 1132 | urb->status = -EREMOTEIO; |
| 1129 | printk(KERN_ERR "not okey\n"); | 1133 | isp1760_dbg(priv, "short bulk, %d instead %zu " |
| 1134 | "with URB_SHORT_NOT_OK flag.\n", | ||
| 1135 | length, qtd->length); | ||
| 1130 | } | 1136 | } |
| 1131 | 1137 | ||
| 1132 | if (urb->status == -EINPROGRESS) | 1138 | if (urb->status == -EINPROGRESS) |
| @@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd) | |||
| 1206 | continue; | 1212 | continue; |
| 1207 | } | 1213 | } |
| 1208 | 1214 | ||
| 1209 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs, | 1215 | isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs + |
| 1210 | int_regs, sizeof(ptd)); | 1216 | HC_MEMORY_REG); |
| 1217 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
| 1218 | HC_MEMORY_REG); | ||
| 1219 | /* | ||
| 1220 | * write bank1 address twice to ensure the 90ns delay (time | ||
| 1221 | * between BANK0 write and the priv_read_copy() call is at | ||
| 1222 | * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns) | ||
| 1223 | */ | ||
| 1224 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
| 1225 | HC_MEMORY_REG); | ||
| 1226 | |||
| 1227 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs + | ||
| 1228 | ISP_BANK(0), sizeof(ptd)); | ||
| 1211 | dw1 = le32_to_cpu(ptd.dw1); | 1229 | dw1 = le32_to_cpu(ptd.dw1); |
| 1212 | dw3 = le32_to_cpu(ptd.dw3); | 1230 | dw3 = le32_to_cpu(ptd.dw3); |
| 1213 | check_int_err_status(le32_to_cpu(ptd.dw4)); | 1231 | check_int_err_status(le32_to_cpu(ptd.dw4)); |
| @@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd) | |||
| 1242 | case IN_PID: | 1260 | case IN_PID: |
| 1243 | priv_read_copy(priv, | 1261 | priv_read_copy(priv, |
| 1244 | priv->int_ints[queue_entry].data_buffer, | 1262 | priv->int_ints[queue_entry].data_buffer, |
| 1245 | usb_hcd->regs + payload , payload, | 1263 | usb_hcd->regs + payload + ISP_BANK(1), |
| 1246 | length); | 1264 | length); |
| 1247 | case OUT_PID: | 1265 | case OUT_PID: |
| 1248 | 1266 | ||
| @@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |||
| 1615 | return -EPIPE; | 1633 | return -EPIPE; |
| 1616 | } | 1634 | } |
| 1617 | 1635 | ||
| 1618 | isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); | 1636 | return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); |
| 1619 | return 0; | ||
| 1620 | } | 1637 | } |
| 1621 | 1638 | ||
| 1622 | static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, | 1639 | static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, |
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 6473dd86993c..4377277667d9 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h | |||
| @@ -54,6 +54,8 @@ void deinit_kmem_cache(void); | |||
| 54 | #define BUFFER_MAP 0x7 | 54 | #define BUFFER_MAP 0x7 |
| 55 | 55 | ||
| 56 | #define HC_MEMORY_REG 0x33c | 56 | #define HC_MEMORY_REG 0x33c |
| 57 | #define ISP_BANK(x) ((x) << 16) | ||
| 58 | |||
| 57 | #define HC_PORT1_CTRL 0x374 | 59 | #define HC_PORT1_CTRL 0x374 |
| 58 | #define PORT1_POWER (3 << 3) | 60 | #define PORT1_POWER (3 << 3) |
| 59 | #define PORT1_INIT1 (1 << 7) | 61 | #define PORT1_INIT1 (1 << 7) |
| @@ -119,6 +121,9 @@ struct inter_packet_info { | |||
| 119 | typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, | 121 | typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, |
| 120 | struct isp1760_qtd *qtd); | 122 | struct isp1760_qtd *qtd); |
| 121 | 123 | ||
| 124 | #define isp1760_dbg(priv, fmt, args...) \ | ||
| 125 | dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args) | ||
| 126 | |||
| 122 | #define isp1760_info(priv, fmt, args...) \ | 127 | #define isp1760_info(priv, fmt, args...) \ |
| 123 | dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) | 128 | dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) |
| 124 | 129 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 26bc47941d01..89901962cbfd 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
| @@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd); | |||
| 86 | static int ohci_restart (struct ohci_hcd *ohci); | 86 | static int ohci_restart (struct ohci_hcd *ohci); |
| 87 | #endif | 87 | #endif |
| 88 | 88 | ||
| 89 | #ifdef CONFIG_PCI | ||
| 90 | static void quirk_amd_pll(int state); | ||
| 91 | static void amd_iso_dev_put(void); | ||
| 92 | #else | ||
| 93 | static inline void quirk_amd_pll(int state) | ||
| 94 | { | ||
| 95 | return; | ||
| 96 | } | ||
| 97 | static inline void amd_iso_dev_put(void) | ||
| 98 | { | ||
| 99 | return; | ||
| 100 | } | ||
| 101 | #endif | ||
| 102 | |||
| 103 | |||
| 89 | #include "ohci-hub.c" | 104 | #include "ohci-hub.c" |
| 90 | #include "ohci-dbg.c" | 105 | #include "ohci-dbg.c" |
| 91 | #include "ohci-mem.c" | 106 | #include "ohci-mem.c" |
| @@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci) | |||
| 483 | int ret; | 498 | int ret; |
| 484 | struct usb_hcd *hcd = ohci_to_hcd(ohci); | 499 | struct usb_hcd *hcd = ohci_to_hcd(ohci); |
| 485 | 500 | ||
| 501 | if (distrust_firmware) | ||
| 502 | ohci->flags |= OHCI_QUIRK_HUB_POWER; | ||
| 503 | |||
| 486 | disable (ohci); | 504 | disable (ohci); |
| 487 | ohci->regs = hcd->regs; | 505 | ohci->regs = hcd->regs; |
| 488 | 506 | ||
| @@ -689,7 +707,8 @@ retry: | |||
| 689 | temp |= RH_A_NOCP; | 707 | temp |= RH_A_NOCP; |
| 690 | temp &= ~(RH_A_POTPGT | RH_A_NPS); | 708 | temp &= ~(RH_A_POTPGT | RH_A_NPS); |
| 691 | ohci_writel (ohci, temp, &ohci->regs->roothub.a); | 709 | ohci_writel (ohci, temp, &ohci->regs->roothub.a); |
| 692 | } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) { | 710 | } else if ((ohci->flags & OHCI_QUIRK_AMD756) || |
| 711 | (ohci->flags & OHCI_QUIRK_HUB_POWER)) { | ||
| 693 | /* hub power always on; required for AMD-756 and some | 712 | /* hub power always on; required for AMD-756 and some |
| 694 | * Mac platforms. ganged overcurrent reporting, if any. | 713 | * Mac platforms. ganged overcurrent reporting, if any. |
| 695 | */ | 714 | */ |
| @@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd) | |||
| 882 | 901 | ||
| 883 | if (quirk_zfmicro(ohci)) | 902 | if (quirk_zfmicro(ohci)) |
| 884 | del_timer(&ohci->unlink_watchdog); | 903 | del_timer(&ohci->unlink_watchdog); |
| 904 | if (quirk_amdiso(ohci)) | ||
| 905 | amd_iso_dev_put(); | ||
| 885 | 906 | ||
| 886 | remove_debug_files (ohci); | 907 | remove_debug_files (ohci); |
| 887 | ohci_mem_cleanup (ohci); | 908 | ohci_mem_cleanup (ohci); |
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index b56739221d11..439beb784f3e 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
| @@ -483,6 +483,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf) | |||
| 483 | length++; | 483 | length++; |
| 484 | } | 484 | } |
| 485 | 485 | ||
| 486 | /* Some broken controllers never turn off RHCS in the interrupt | ||
| 487 | * status register. For their sake we won't re-enable RHSC | ||
| 488 | * interrupts if the flag is already set. | ||
| 489 | */ | ||
| 490 | if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC) | ||
| 491 | changed = 1; | ||
| 492 | |||
| 486 | /* look at each port */ | 493 | /* look at each port */ |
| 487 | for (i = 0; i < ohci->num_ports; i++) { | 494 | for (i = 0; i < ohci->num_ports; i++) { |
| 488 | u32 status = roothub_portstatus (ohci, i); | 495 | u32 status = roothub_portstatus (ohci, i); |
| @@ -572,8 +579,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port) | |||
| 572 | return 0; | 579 | return 0; |
| 573 | } | 580 | } |
| 574 | 581 | ||
| 575 | static void start_hnp(struct ohci_hcd *ohci); | ||
| 576 | |||
| 577 | #else | 582 | #else |
| 578 | 583 | ||
| 579 | #define ohci_start_port_reset NULL | 584 | #define ohci_start_port_reset NULL |
| @@ -760,7 +765,7 @@ static int ohci_hub_control ( | |||
| 760 | #ifdef CONFIG_USB_OTG | 765 | #ifdef CONFIG_USB_OTG |
| 761 | if (hcd->self.otg_port == (wIndex + 1) | 766 | if (hcd->self.otg_port == (wIndex + 1) |
| 762 | && hcd->self.b_hnp_enable) | 767 | && hcd->self.b_hnp_enable) |
| 763 | start_hnp(ohci); | 768 | ohci->start_hnp(ohci); |
| 764 | else | 769 | else |
| 765 | #endif | 770 | #endif |
| 766 | ohci_writel (ohci, RH_PS_PSS, | 771 | ohci_writel (ohci, RH_PS_PSS, |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 94dfca02f7e1..3d532b709670 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
| @@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
| 225 | dev_err(hcd->self.controller, "can't find transceiver\n"); | 225 | dev_err(hcd->self.controller, "can't find transceiver\n"); |
| 226 | return -ENODEV; | 226 | return -ENODEV; |
| 227 | } | 227 | } |
| 228 | ohci->start_hnp = start_hnp; | ||
| 228 | } | 229 | } |
| 229 | #endif | 230 | #endif |
| 230 | 231 | ||
| @@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
| 260 | omap_cfg_reg(W4_USB_HIGHZ); | 261 | omap_cfg_reg(W4_USB_HIGHZ); |
| 261 | } | 262 | } |
| 262 | ohci_writel(ohci, rh, &ohci->regs->roothub.a); | 263 | ohci_writel(ohci, rh, &ohci->regs->roothub.a); |
| 263 | distrust_firmware = 0; | 264 | ohci->flags &= ~OHCI_QUIRK_HUB_POWER; |
| 264 | } else if (machine_is_nokia770()) { | 265 | } else if (machine_is_nokia770()) { |
| 265 | /* We require a self-powered hub, which should have | 266 | /* We require a self-powered hub, which should have |
| 266 | * plenty of power. */ | 267 | * plenty of power. */ |
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 4696cc912e16..083e8df0a817 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
| @@ -18,6 +18,28 @@ | |||
| 18 | #error "This file is PCI bus glue. CONFIG_PCI must be defined." | 18 | #error "This file is PCI bus glue. CONFIG_PCI must be defined." |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/io.h> | ||
| 23 | |||
| 24 | |||
| 25 | /* constants used to work around PM-related transfer | ||
| 26 | * glitches in some AMD 700 series southbridges | ||
| 27 | */ | ||
| 28 | #define AB_REG_BAR 0xf0 | ||
| 29 | #define AB_INDX(addr) ((addr) + 0x00) | ||
| 30 | #define AB_DATA(addr) ((addr) + 0x04) | ||
| 31 | #define AX_INDXC 0X30 | ||
| 32 | #define AX_DATAC 0x34 | ||
| 33 | |||
| 34 | #define NB_PCIE_INDX_ADDR 0xe0 | ||
| 35 | #define NB_PCIE_INDX_DATA 0xe4 | ||
| 36 | #define PCIE_P_CNTL 0x10040 | ||
| 37 | #define BIF_NB 0x10002 | ||
| 38 | |||
| 39 | static struct pci_dev *amd_smbus_dev; | ||
| 40 | static struct pci_dev *amd_hb_dev; | ||
| 41 | static int amd_ohci_iso_count; | ||
| 42 | |||
| 21 | /*-------------------------------------------------------------------------*/ | 43 | /*-------------------------------------------------------------------------*/ |
| 22 | 44 | ||
| 23 | static int broken_suspend(struct usb_hcd *hcd) | 45 | static int broken_suspend(struct usb_hcd *hcd) |
| @@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd) | |||
| 143 | return 0; | 165 | return 0; |
| 144 | } | 166 | } |
| 145 | 167 | ||
| 168 | static int ohci_quirk_amd700(struct usb_hcd *hcd) | ||
| 169 | { | ||
| 170 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | ||
| 171 | u8 rev = 0; | ||
| 172 | |||
| 173 | if (!amd_smbus_dev) | ||
| 174 | amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, | ||
| 175 | PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); | ||
| 176 | if (!amd_smbus_dev) | ||
| 177 | return 0; | ||
| 178 | |||
| 179 | pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); | ||
| 180 | if ((rev > 0x3b) || (rev < 0x30)) { | ||
| 181 | pci_dev_put(amd_smbus_dev); | ||
| 182 | amd_smbus_dev = NULL; | ||
| 183 | return 0; | ||
| 184 | } | ||
| 185 | |||
| 186 | amd_ohci_iso_count++; | ||
| 187 | |||
| 188 | if (!amd_hb_dev) | ||
| 189 | amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL); | ||
| 190 | |||
| 191 | ohci->flags |= OHCI_QUIRK_AMD_ISO; | ||
| 192 | ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n"); | ||
| 193 | |||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | |||
| 197 | /* | ||
| 198 | * The hardware normally enables the A-link power management feature, which | ||
| 199 | * lets the system lower the power consumption in idle states. | ||
| 200 | * | ||
| 201 | * Assume the system is configured to have USB 1.1 ISO transfers going | ||
| 202 | * to or from a USB device. Without this quirk, that stream may stutter | ||
| 203 | * or have breaks occasionally. For transfers going to speakers, this | ||
| 204 | * makes a very audible mess... | ||
| 205 | * | ||
| 206 | * That audio playback corruption is due to the audio stream getting | ||
| 207 | * interrupted occasionally when the link goes in lower power state | ||
| 208 | * This USB quirk prevents the link going into that lower power state | ||
| 209 | * during audio playback or other ISO operations. | ||
| 210 | */ | ||
| 211 | static void quirk_amd_pll(int on) | ||
| 212 | { | ||
| 213 | u32 addr; | ||
| 214 | u32 val; | ||
| 215 | u32 bit = (on > 0) ? 1 : 0; | ||
| 216 | |||
| 217 | pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr); | ||
| 218 | |||
| 219 | /* BIT names/meanings are NDA-protected, sorry ... */ | ||
| 220 | |||
| 221 | outl(AX_INDXC, AB_INDX(addr)); | ||
| 222 | outl(0x40, AB_DATA(addr)); | ||
| 223 | outl(AX_DATAC, AB_INDX(addr)); | ||
| 224 | val = inl(AB_DATA(addr)); | ||
| 225 | val &= ~((1 << 3) | (1 << 4) | (1 << 9)); | ||
| 226 | val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9); | ||
| 227 | outl(val, AB_DATA(addr)); | ||
| 228 | |||
| 229 | if (amd_hb_dev) { | ||
| 230 | addr = PCIE_P_CNTL; | ||
| 231 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr); | ||
| 232 | |||
| 233 | pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val); | ||
| 234 | val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); | ||
| 235 | val |= bit | (bit << 3) | (bit << 12); | ||
| 236 | val |= ((!bit) << 4) | ((!bit) << 9); | ||
| 237 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val); | ||
| 238 | |||
| 239 | addr = BIF_NB; | ||
| 240 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr); | ||
| 241 | |||
| 242 | pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val); | ||
| 243 | val &= ~(1 << 8); | ||
| 244 | val |= bit << 8; | ||
| 245 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val); | ||
| 246 | } | ||
| 247 | } | ||
| 248 | |||
| 249 | static void amd_iso_dev_put(void) | ||
| 250 | { | ||
| 251 | amd_ohci_iso_count--; | ||
| 252 | if (amd_ohci_iso_count == 0) { | ||
| 253 | if (amd_smbus_dev) { | ||
| 254 | pci_dev_put(amd_smbus_dev); | ||
| 255 | amd_smbus_dev = NULL; | ||
| 256 | } | ||
| 257 | if (amd_hb_dev) { | ||
| 258 | pci_dev_put(amd_hb_dev); | ||
| 259 | amd_hb_dev = NULL; | ||
| 260 | } | ||
| 261 | } | ||
| 262 | |||
| 263 | } | ||
| 264 | |||
| 146 | /* List of quirks for OHCI */ | 265 | /* List of quirks for OHCI */ |
| 147 | static const struct pci_device_id ohci_pci_quirks[] = { | 266 | static const struct pci_device_id ohci_pci_quirks[] = { |
| 148 | { | 267 | { |
| @@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = { | |||
| 181 | PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), | 300 | PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), |
| 182 | .driver_data = (unsigned long) broken_suspend, | 301 | .driver_data = (unsigned long) broken_suspend, |
| 183 | }, | 302 | }, |
| 303 | { | ||
| 304 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397), | ||
| 305 | .driver_data = (unsigned long)ohci_quirk_amd700, | ||
| 306 | }, | ||
| 307 | { | ||
| 308 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398), | ||
| 309 | .driver_data = (unsigned long)ohci_quirk_amd700, | ||
| 310 | }, | ||
| 311 | { | ||
| 312 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), | ||
| 313 | .driver_data = (unsigned long)ohci_quirk_amd700, | ||
| 314 | }, | ||
| 315 | |||
| 184 | /* FIXME for some of the early AMD 760 southbridges, OHCI | 316 | /* FIXME for some of the early AMD 760 southbridges, OHCI |
| 185 | * won't work at all. blacklist them. | 317 | * won't work at all. blacklist them. |
| 186 | */ | 318 | */ |
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 6a9b4c557953..c2d80f80448b 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
| @@ -49,6 +49,9 @@ __acquires(ohci->lock) | |||
| 49 | switch (usb_pipetype (urb->pipe)) { | 49 | switch (usb_pipetype (urb->pipe)) { |
| 50 | case PIPE_ISOCHRONOUS: | 50 | case PIPE_ISOCHRONOUS: |
| 51 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; | 51 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; |
| 52 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | ||
| 53 | && quirk_amdiso(ohci)) | ||
| 54 | quirk_amd_pll(1); | ||
| 52 | break; | 55 | break; |
| 53 | case PIPE_INTERRUPT: | 56 | case PIPE_INTERRUPT: |
| 54 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; | 57 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; |
| @@ -677,6 +680,9 @@ static void td_submit_urb ( | |||
| 677 | data + urb->iso_frame_desc [cnt].offset, | 680 | data + urb->iso_frame_desc [cnt].offset, |
| 678 | urb->iso_frame_desc [cnt].length, urb, cnt); | 681 | urb->iso_frame_desc [cnt].length, urb, cnt); |
| 679 | } | 682 | } |
| 683 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | ||
| 684 | && quirk_amdiso(ohci)) | ||
| 685 | quirk_amd_pll(0); | ||
| 680 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 | 686 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 |
| 681 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; | 687 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; |
| 682 | break; | 688 | break; |
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index dc544ddc7849..faf622eafce7 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h | |||
| @@ -371,6 +371,7 @@ struct ohci_hcd { | |||
| 371 | * other external transceivers should be software-transparent | 371 | * other external transceivers should be software-transparent |
| 372 | */ | 372 | */ |
| 373 | struct otg_transceiver *transceiver; | 373 | struct otg_transceiver *transceiver; |
| 374 | void (*start_hnp)(struct ohci_hcd *ohci); | ||
| 374 | 375 | ||
| 375 | /* | 376 | /* |
| 376 | * memory management for queue data structures | 377 | * memory management for queue data structures |
| @@ -399,6 +400,8 @@ struct ohci_hcd { | |||
| 399 | #define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ | 400 | #define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ |
| 400 | #define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ | 401 | #define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ |
| 401 | #define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ | 402 | #define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ |
| 403 | #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ | ||
| 404 | #define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ | ||
| 402 | // there are also chip quirks/bugs in init logic | 405 | // there are also chip quirks/bugs in init logic |
| 403 | 406 | ||
| 404 | struct work_struct nec_work; /* Worker for NEC quirk */ | 407 | struct work_struct nec_work; /* Worker for NEC quirk */ |
| @@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci) | |||
| 426 | { | 429 | { |
| 427 | return ohci->flags & OHCI_QUIRK_ZFMICRO; | 430 | return ohci->flags & OHCI_QUIRK_ZFMICRO; |
| 428 | } | 431 | } |
| 432 | static inline int quirk_amdiso(struct ohci_hcd *ohci) | ||
| 433 | { | ||
| 434 | return ohci->flags & OHCI_QUIRK_AMD_ISO; | ||
| 435 | } | ||
| 429 | #else | 436 | #else |
| 430 | static inline int quirk_nec(struct ohci_hcd *ohci) | 437 | static inline int quirk_nec(struct ohci_hcd *ohci) |
| 431 | { | 438 | { |
| @@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci) | |||
| 435 | { | 442 | { |
| 436 | return 0; | 443 | return 0; |
| 437 | } | 444 | } |
| 445 | static inline int quirk_amdiso(struct ohci_hcd *ohci) | ||
| 446 | { | ||
| 447 | return 0; | ||
| 448 | } | ||
| 438 | #endif | 449 | #endif |
| 439 | 450 | ||
| 440 | /* convert between an hcd pointer and the corresponding ohci_hcd */ | 451 | /* convert between an hcd pointer and the corresponding ohci_hcd */ |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index d5f02dddb120..ea7126f99cab 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
| @@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum) | |||
| 964 | disable_irq_nrdy(r8a66597, pipenum); | 964 | disable_irq_nrdy(r8a66597, pipenum); |
| 965 | } | 965 | } |
| 966 | 966 | ||
| 967 | static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597) | ||
| 968 | { | ||
| 969 | mod_timer(&r8a66597->rh_timer, | ||
| 970 | jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME)); | ||
| 971 | } | ||
| 972 | |||
| 973 | static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, | ||
| 974 | int connect) | ||
| 975 | { | ||
| 976 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | ||
| 977 | |||
| 978 | rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; | ||
| 979 | rh->scount = R8A66597_MAX_SAMPLING; | ||
| 980 | if (connect) | ||
| 981 | rh->port |= 1 << USB_PORT_FEAT_CONNECTION; | ||
| 982 | else | ||
| 983 | rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION); | ||
| 984 | rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION; | ||
| 985 | |||
| 986 | r8a66597_root_hub_start_polling(r8a66597); | ||
| 987 | } | ||
| 988 | |||
| 967 | /* this function must be called with interrupt disabled */ | 989 | /* this function must be called with interrupt disabled */ |
| 968 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | 990 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, |
| 969 | u16 syssts) | 991 | u16 syssts) |
| 970 | { | 992 | { |
| 971 | if (syssts == SE0) { | 993 | if (syssts == SE0) { |
| 994 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); | ||
| 972 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); | 995 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); |
| 973 | return; | 996 | return; |
| 974 | } | 997 | } |
| @@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port) | |||
| 1002 | { | 1025 | { |
| 1003 | struct r8a66597_device *dev = r8a66597->root_hub[port].dev; | 1026 | struct r8a66597_device *dev = r8a66597->root_hub[port].dev; |
| 1004 | 1027 | ||
| 1005 | r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION); | ||
| 1006 | r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION); | ||
| 1007 | |||
| 1008 | disable_r8a66597_pipe_all(r8a66597, dev); | 1028 | disable_r8a66597_pipe_all(r8a66597, dev); |
| 1009 | free_usb_address(r8a66597, dev); | 1029 | free_usb_address(r8a66597, dev); |
| 1010 | 1030 | ||
| 1011 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); | 1031 | start_root_hub_sampling(r8a66597, port, 0); |
| 1012 | } | 1032 | } |
| 1013 | 1033 | ||
| 1014 | /* this function must be called with interrupt disabled */ | 1034 | /* this function must be called with interrupt disabled */ |
| @@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597) | |||
| 1551 | } | 1571 | } |
| 1552 | } | 1572 | } |
| 1553 | 1573 | ||
| 1554 | static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597) | ||
| 1555 | { | ||
| 1556 | mod_timer(&r8a66597->rh_timer, | ||
| 1557 | jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME)); | ||
| 1558 | } | ||
| 1559 | |||
| 1560 | static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port) | ||
| 1561 | { | ||
| 1562 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | ||
| 1563 | |||
| 1564 | rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; | ||
| 1565 | rh->scount = R8A66597_MAX_SAMPLING; | ||
| 1566 | r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION) | ||
| 1567 | | (1 << USB_PORT_FEAT_C_CONNECTION); | ||
| 1568 | r8a66597_root_hub_start_polling(r8a66597); | ||
| 1569 | } | ||
| 1570 | |||
| 1571 | static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | 1574 | static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) |
| 1572 | { | 1575 | { |
| 1573 | struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); | 1576 | struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); |
| @@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | |||
| 1594 | r8a66597_bclr(r8a66597, ATTCHE, INTENB2); | 1597 | r8a66597_bclr(r8a66597, ATTCHE, INTENB2); |
| 1595 | 1598 | ||
| 1596 | /* start usb bus sampling */ | 1599 | /* start usb bus sampling */ |
| 1597 | start_root_hub_sampling(r8a66597, 1); | 1600 | start_root_hub_sampling(r8a66597, 1, 1); |
| 1598 | } | 1601 | } |
| 1599 | if (mask2 & DTCH) { | 1602 | if (mask2 & DTCH) { |
| 1600 | r8a66597_write(r8a66597, ~DTCH, INTSTS2); | 1603 | r8a66597_write(r8a66597, ~DTCH, INTSTS2); |
| @@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | |||
| 1609 | r8a66597_bclr(r8a66597, ATTCHE, INTENB1); | 1612 | r8a66597_bclr(r8a66597, ATTCHE, INTENB1); |
| 1610 | 1613 | ||
| 1611 | /* start usb bus sampling */ | 1614 | /* start usb bus sampling */ |
| 1612 | start_root_hub_sampling(r8a66597, 0); | 1615 | start_root_hub_sampling(r8a66597, 0, 1); |
| 1613 | } | 1616 | } |
| 1614 | if (mask1 & DTCH) { | 1617 | if (mask1 & DTCH) { |
| 1615 | r8a66597_write(r8a66597, ~DTCH, INTSTS1); | 1618 | r8a66597_write(r8a66597, ~DTCH, INTSTS1); |
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 001789c9a11a..4ea50e0abcbb 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig | |||
| @@ -42,16 +42,6 @@ config USB_ADUTUX | |||
| 42 | To compile this driver as a module, choose M here. The module | 42 | To compile this driver as a module, choose M here. The module |
| 43 | will be called adutux. | 43 | will be called adutux. |
| 44 | 44 | ||
| 45 | config USB_AUERSWALD | ||
| 46 | tristate "USB Auerswald ISDN support" | ||
| 47 | depends on USB | ||
| 48 | help | ||
| 49 | Say Y here if you want to connect an Auerswald USB ISDN Device | ||
| 50 | to your computer's USB port. | ||
| 51 | |||
| 52 | To compile this driver as a module, choose M here: the | ||
| 53 | module will be called auerswald. | ||
| 54 | |||
| 55 | config USB_RIO500 | 45 | config USB_RIO500 |
| 56 | tristate "USB Diamond Rio500 support" | 46 | tristate "USB Diamond Rio500 support" |
| 57 | depends on USB | 47 | depends on USB |
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index aba091cb5ec0..45b4e12afb08 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile | |||
| @@ -5,7 +5,6 @@ | |||
| 5 | 5 | ||
| 6 | obj-$(CONFIG_USB_ADUTUX) += adutux.o | 6 | obj-$(CONFIG_USB_ADUTUX) += adutux.o |
| 7 | obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o | 7 | obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o |
| 8 | obj-$(CONFIG_USB_AUERSWALD) += auerswald.o | ||
| 9 | obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o | 8 | obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o |
| 10 | obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o | 9 | obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o |
| 11 | obj-$(CONFIG_USB_CYTHERM) += cytherm.o | 10 | obj-$(CONFIG_USB_CYTHERM) += cytherm.o |
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c deleted file mode 100644 index d2f61d5510e7..000000000000 --- a/drivers/usb/misc/auerswald.c +++ /dev/null | |||
| @@ -1,2152 +0,0 @@ | |||
| 1 | /*****************************************************************************/ | ||
| 2 | /* | ||
| 3 | * auerswald.c -- Auerswald PBX/System Telephone usb driver. | ||
| 4 | * | ||
| 5 | * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de) | ||
| 6 | * | ||
| 7 | * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl) | ||
| 8 | * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you. | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software | ||
| 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 23 | */ | ||
| 24 | /*****************************************************************************/ | ||
| 25 | |||
| 26 | /* Standard Linux module include files */ | ||
| 27 | #include <asm/uaccess.h> | ||
| 28 | #include <asm/byteorder.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/module.h> | ||
| 31 | #include <linux/init.h> | ||
| 32 | #include <linux/wait.h> | ||
| 33 | #include <linux/usb.h> | ||
| 34 | #include <linux/mutex.h> | ||
| 35 | |||
| 36 | /*-------------------------------------------------------------------*/ | ||
| 37 | /* Debug support */ | ||
| 38 | #ifdef DEBUG | ||
| 39 | #define dump( adr, len) \ | ||
| 40 | do { \ | ||
| 41 | unsigned int u; \ | ||
| 42 | printk (KERN_DEBUG); \ | ||
| 43 | for (u = 0; u < len; u++) \ | ||
| 44 | printk (" %02X", adr[u] & 0xFF); \ | ||
| 45 | printk ("\n"); \ | ||
| 46 | } while (0) | ||
| 47 | #else | ||
| 48 | #define dump( adr, len) | ||
| 49 | #endif | ||
| 50 | |||
| 51 | /*-------------------------------------------------------------------*/ | ||
| 52 | /* Version Information */ | ||
| 53 | #define DRIVER_VERSION "0.9.11" | ||
| 54 | #define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>" | ||
| 55 | #define DRIVER_DESC "Auerswald PBX/System Telephone usb driver" | ||
| 56 | |||
| 57 | /*-------------------------------------------------------------------*/ | ||
| 58 | /* Private declarations for Auerswald USB driver */ | ||
| 59 | |||
| 60 | /* Auerswald Vendor ID */ | ||
| 61 | #define ID_AUERSWALD 0x09BF | ||
| 62 | |||
| 63 | #define AUER_MINOR_BASE 112 /* auerswald driver minor number */ | ||
| 64 | |||
| 65 | /* we can have up to this number of device plugged in at once */ | ||
| 66 | #define AUER_MAX_DEVICES 16 | ||
| 67 | |||
| 68 | |||
| 69 | /* Number of read buffers for each device */ | ||
| 70 | #define AU_RBUFFERS 10 | ||
| 71 | |||
| 72 | /* Number of chain elements for each control chain */ | ||
| 73 | #define AUCH_ELEMENTS 20 | ||
| 74 | |||
| 75 | /* Number of retries in communication */ | ||
| 76 | #define AU_RETRIES 10 | ||
| 77 | |||
| 78 | /*-------------------------------------------------------------------*/ | ||
| 79 | /* vendor specific protocol */ | ||
| 80 | /* Header Byte */ | ||
| 81 | #define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */ | ||
| 82 | #define AUH_DIRECT 0x00 /* data is for USB device */ | ||
| 83 | #define AUH_INDIRECT 0x80 /* USB device is relay */ | ||
| 84 | |||
| 85 | #define AUH_SPLITMASK 0x40 /* mask for split bit */ | ||
| 86 | #define AUH_UNSPLIT 0x00 /* data block is full-size */ | ||
| 87 | #define AUH_SPLIT 0x40 /* data block is part of a larger one, | ||
| 88 | split-byte follows */ | ||
| 89 | |||
| 90 | #define AUH_TYPEMASK 0x3F /* mask for type of data transfer */ | ||
| 91 | #define AUH_TYPESIZE 0x40 /* different types */ | ||
| 92 | #define AUH_DCHANNEL 0x00 /* D channel data */ | ||
| 93 | #define AUH_B1CHANNEL 0x01 /* B1 channel transparent */ | ||
| 94 | #define AUH_B2CHANNEL 0x02 /* B2 channel transparent */ | ||
| 95 | /* 0x03..0x0F reserved for driver internal use */ | ||
| 96 | #define AUH_COMMAND 0x10 /* Command channel */ | ||
| 97 | #define AUH_BPROT 0x11 /* Configuration block protocol */ | ||
| 98 | #define AUH_DPROTANA 0x12 /* D channel protocol analyzer */ | ||
| 99 | #define AUH_TAPI 0x13 /* telephone api data (ATD) */ | ||
| 100 | /* 0x14..0x3F reserved for other protocols */ | ||
| 101 | #define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */ | ||
| 102 | #define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */ | ||
| 103 | |||
| 104 | #define AUH_SIZE 1 /* Size of Header Byte */ | ||
| 105 | |||
| 106 | /* Split Byte. Only present if split bit in header byte set.*/ | ||
| 107 | #define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */ | ||
| 108 | #define AUS_FIRST 0x80 /* first block */ | ||
| 109 | #define AUS_FOLLOW 0x00 /* following block */ | ||
| 110 | |||
| 111 | #define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */ | ||
| 112 | #define AUS_END 0x40 /* last block */ | ||
| 113 | #define AUS_NOEND 0x00 /* not the last block */ | ||
| 114 | |||
| 115 | #define AUS_LENMASK 0x3F /* mask for block length information */ | ||
| 116 | |||
| 117 | /* Request types */ | ||
| 118 | #define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */ | ||
| 119 | #define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */ | ||
| 120 | |||
| 121 | /* Vendor Requests */ | ||
| 122 | #define AUV_GETINFO 0x00 /* GetDeviceInfo */ | ||
| 123 | #define AUV_WBLOCK 0x01 /* Write Block */ | ||
| 124 | #define AUV_RBLOCK 0x02 /* Read Block */ | ||
| 125 | #define AUV_CHANNELCTL 0x03 /* Channel Control */ | ||
| 126 | #define AUV_DUMMY 0x04 /* Dummy Out for retry */ | ||
| 127 | |||
| 128 | /* Device Info Types */ | ||
| 129 | #define AUDI_NUMBCH 0x0000 /* Number of supported B channels */ | ||
| 130 | #define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */ | ||
| 131 | #define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */ | ||
| 132 | |||
| 133 | /* Interrupt endpoint definitions */ | ||
| 134 | #define AU_IRQENDP 1 /* Endpoint number */ | ||
| 135 | #define AU_IRQCMDID 16 /* Command-block ID */ | ||
| 136 | #define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */ | ||
| 137 | #define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */ | ||
| 138 | |||
| 139 | /* Device String Descriptors */ | ||
| 140 | #define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */ | ||
| 141 | #define AUSI_DEVICE 2 /* Name of the Device */ | ||
| 142 | #define AUSI_SERIALNR 3 /* Serial Number */ | ||
| 143 | #define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */ | ||
| 144 | |||
| 145 | #define AUSI_DLEN 100 /* Max. Length of Device Description */ | ||
| 146 | |||
| 147 | #define AUV_RETRY 0x101 /* First Firmware version which can do control retries */ | ||
| 148 | |||
| 149 | /*-------------------------------------------------------------------*/ | ||
| 150 | /* External data structures / Interface */ | ||
| 151 | typedef struct | ||
| 152 | { | ||
| 153 | char __user *buf; /* return buffer for string contents */ | ||
| 154 | unsigned int bsize; /* size of return buffer */ | ||
| 155 | } audevinfo_t,*paudevinfo_t; | ||
| 156 | |||
| 157 | /* IO controls */ | ||
| 158 | #define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */ | ||
| 159 | #define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */ | ||
| 160 | #define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */ | ||
| 161 | #define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */ | ||
| 162 | #define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */ | ||
| 163 | #define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */ | ||
| 164 | #define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */ | ||
| 165 | /* 'U' 0xF7..0xFF reseved */ | ||
| 166 | |||
| 167 | /*-------------------------------------------------------------------*/ | ||
| 168 | /* Internal data structures */ | ||
| 169 | |||
| 170 | /* ..................................................................*/ | ||
| 171 | /* urb chain element */ | ||
| 172 | struct auerchain; /* forward for circular reference */ | ||
| 173 | typedef struct | ||
| 174 | { | ||
| 175 | struct auerchain *chain; /* pointer to the chain to which this element belongs */ | ||
| 176 | struct urb * urbp; /* pointer to attached urb */ | ||
| 177 | void *context; /* saved URB context */ | ||
| 178 | usb_complete_t complete; /* saved URB completion function */ | ||
| 179 | struct list_head list; /* to include element into a list */ | ||
| 180 | } auerchainelement_t,*pauerchainelement_t; | ||
| 181 | |||
| 182 | /* urb chain */ | ||
| 183 | typedef struct auerchain | ||
| 184 | { | ||
| 185 | pauerchainelement_t active; /* element which is submitted to urb */ | ||
| 186 | spinlock_t lock; /* protection agains interrupts */ | ||
| 187 | struct list_head waiting_list; /* list of waiting elements */ | ||
| 188 | struct list_head free_list; /* list of available elements */ | ||
| 189 | } auerchain_t,*pauerchain_t; | ||
| 190 | |||
| 191 | /* urb blocking completion helper struct */ | ||
| 192 | typedef struct | ||
| 193 | { | ||
| 194 | wait_queue_head_t wqh; /* wait for completion */ | ||
| 195 | unsigned int done; /* completion flag */ | ||
| 196 | } auerchain_chs_t,*pauerchain_chs_t; | ||
| 197 | |||
| 198 | /* ...................................................................*/ | ||
| 199 | /* buffer element */ | ||
| 200 | struct auerbufctl; /* forward */ | ||
| 201 | typedef struct | ||
| 202 | { | ||
| 203 | char *bufp; /* reference to allocated data buffer */ | ||
| 204 | unsigned int len; /* number of characters in data buffer */ | ||
| 205 | unsigned int retries; /* for urb retries */ | ||
| 206 | struct usb_ctrlrequest *dr; /* for setup data in control messages */ | ||
| 207 | struct urb * urbp; /* USB urb */ | ||
| 208 | struct auerbufctl *list; /* pointer to list */ | ||
| 209 | struct list_head buff_list; /* reference to next buffer in list */ | ||
| 210 | } auerbuf_t,*pauerbuf_t; | ||
| 211 | |||
| 212 | /* buffer list control block */ | ||
| 213 | typedef struct auerbufctl | ||
| 214 | { | ||
| 215 | spinlock_t lock; /* protection in interrupt */ | ||
| 216 | struct list_head free_buff_list;/* free buffers */ | ||
| 217 | struct list_head rec_buff_list; /* buffers with receive data */ | ||
| 218 | } auerbufctl_t,*pauerbufctl_t; | ||
| 219 | |||
| 220 | /* ...................................................................*/ | ||
| 221 | /* service context */ | ||
| 222 | struct auerscon; /* forward */ | ||
| 223 | typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t); | ||
| 224 | typedef void (*auer_disconn_t) (struct auerscon*); | ||
| 225 | typedef struct auerscon | ||
| 226 | { | ||
| 227 | unsigned int id; /* protocol service id AUH_xxxx */ | ||
| 228 | auer_dispatch_t dispatch; /* dispatch read buffer */ | ||
| 229 | auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */ | ||
| 230 | } auerscon_t,*pauerscon_t; | ||
| 231 | |||
| 232 | /* ...................................................................*/ | ||
| 233 | /* USB device context */ | ||
| 234 | typedef struct | ||
| 235 | { | ||
| 236 | struct mutex mutex; /* protection in user context */ | ||
| 237 | char name[20]; /* name of the /dev/usb entry */ | ||
| 238 | unsigned int dtindex; /* index in the device table */ | ||
| 239 | struct usb_device * usbdev; /* USB device handle */ | ||
| 240 | int open_count; /* count the number of open character channels */ | ||
| 241 | char dev_desc[AUSI_DLEN];/* for storing a textual description */ | ||
| 242 | unsigned int maxControlLength; /* max. Length of control paket (without header) */ | ||
| 243 | struct urb * inturbp; /* interrupt urb */ | ||
| 244 | char * intbufp; /* data buffer for interrupt urb */ | ||
| 245 | unsigned int irqsize; /* size of interrupt endpoint 1 */ | ||
| 246 | struct auerchain controlchain; /* for chaining of control messages */ | ||
| 247 | auerbufctl_t bufctl; /* Buffer control for control transfers */ | ||
| 248 | pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */ | ||
| 249 | unsigned int version; /* Version of the device */ | ||
| 250 | wait_queue_head_t bufferwait; /* wait for a control buffer */ | ||
| 251 | } auerswald_t,*pauerswald_t; | ||
| 252 | |||
| 253 | /* ................................................................... */ | ||
| 254 | /* character device context */ | ||
| 255 | typedef struct | ||
| 256 | { | ||
| 257 | struct mutex mutex; /* protection in user context */ | ||
| 258 | pauerswald_t auerdev; /* context pointer of assigned device */ | ||
| 259 | auerbufctl_t bufctl; /* controls the buffer chain */ | ||
| 260 | auerscon_t scontext; /* service context */ | ||
| 261 | wait_queue_head_t readwait; /* for synchronous reading */ | ||
| 262 | struct mutex readmutex; /* protection against multiple reads */ | ||
| 263 | pauerbuf_t readbuf; /* buffer held for partial reading */ | ||
| 264 | unsigned int readoffset; /* current offset in readbuf */ | ||
| 265 | unsigned int removed; /* is != 0 if device is removed */ | ||
| 266 | } auerchar_t,*pauerchar_t; | ||
| 267 | |||
| 268 | |||
| 269 | /*-------------------------------------------------------------------*/ | ||
| 270 | /* Forwards */ | ||
| 271 | static void auerswald_ctrlread_complete (struct urb * urb); | ||
| 272 | static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp); | ||
| 273 | static struct usb_driver auerswald_driver; | ||
| 274 | |||
| 275 | |||
| 276 | /*-------------------------------------------------------------------*/ | ||
| 277 | /* USB chain helper functions */ | ||
| 278 | /* -------------------------- */ | ||
| 279 | |||
| 280 | /* completion function for chained urbs */ | ||
| 281 | static void auerchain_complete (struct urb * urb) | ||
| 282 | { | ||
| 283 | unsigned long flags; | ||
| 284 | int result; | ||
| 285 | |||
| 286 | /* get pointer to element and to chain */ | ||
| 287 | pauerchainelement_t acep = urb->context; | ||
| 288 | pauerchain_t acp = acep->chain; | ||
| 289 | |||
| 290 | /* restore original entries in urb */ | ||
| 291 | urb->context = acep->context; | ||
| 292 | urb->complete = acep->complete; | ||
| 293 | |||
| 294 | dbg ("auerchain_complete called"); | ||
| 295 | |||
| 296 | /* call original completion function | ||
| 297 | NOTE: this function may lead to more urbs submitted into the chain. | ||
| 298 | (no chain lock at calling complete()!) | ||
| 299 | acp->active != NULL is protecting us against recursion.*/ | ||
| 300 | urb->complete (urb); | ||
| 301 | |||
| 302 | /* detach element from chain data structure */ | ||
| 303 | spin_lock_irqsave (&acp->lock, flags); | ||
| 304 | if (acp->active != acep) /* paranoia debug check */ | ||
| 305 | dbg ("auerchain_complete: completion on non-active element called!"); | ||
| 306 | else | ||
| 307 | acp->active = NULL; | ||
| 308 | |||
| 309 | /* add the used chain element to the list of free elements */ | ||
| 310 | list_add_tail (&acep->list, &acp->free_list); | ||
| 311 | acep = NULL; | ||
| 312 | |||
| 313 | /* is there a new element waiting in the chain? */ | ||
| 314 | if (!acp->active && !list_empty (&acp->waiting_list)) { | ||
| 315 | /* yes: get the entry */ | ||
| 316 | struct list_head *tmp = acp->waiting_list.next; | ||
| 317 | list_del (tmp); | ||
| 318 | acep = list_entry (tmp, auerchainelement_t, list); | ||
| 319 | acp->active = acep; | ||
| 320 | } | ||
| 321 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 322 | |||
| 323 | /* submit the new urb */ | ||
| 324 | if (acep) { | ||
| 325 | urb = acep->urbp; | ||
| 326 | dbg ("auerchain_complete: submitting next urb from chain"); | ||
| 327 | urb->status = 0; /* needed! */ | ||
| 328 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
| 329 | |||
| 330 | /* check for submit errors */ | ||
| 331 | if (result) { | ||
| 332 | urb->status = result; | ||
| 333 | dbg("auerchain_complete: usb_submit_urb with error code %d", result); | ||
| 334 | /* and do error handling via *this* completion function (recursive) */ | ||
| 335 | auerchain_complete( urb); | ||
| 336 | } | ||
| 337 | } else { | ||
| 338 | /* simple return without submitting a new urb. | ||
| 339 | The empty chain is detected with acp->active == NULL. */ | ||
| 340 | }; | ||
| 341 | } | ||
| 342 | |||
| 343 | |||
| 344 | /* submit function for chained urbs | ||
| 345 | this function may be called from completion context or from user space! | ||
| 346 | early = 1 -> submit in front of chain | ||
| 347 | */ | ||
| 348 | static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early) | ||
| 349 | { | ||
| 350 | int result; | ||
| 351 | unsigned long flags; | ||
| 352 | pauerchainelement_t acep = NULL; | ||
| 353 | |||
| 354 | dbg ("auerchain_submit_urb called"); | ||
| 355 | |||
| 356 | /* try to get a chain element */ | ||
| 357 | spin_lock_irqsave (&acp->lock, flags); | ||
| 358 | if (!list_empty (&acp->free_list)) { | ||
| 359 | /* yes: get the entry */ | ||
| 360 | struct list_head *tmp = acp->free_list.next; | ||
| 361 | list_del (tmp); | ||
| 362 | acep = list_entry (tmp, auerchainelement_t, list); | ||
| 363 | } | ||
| 364 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 365 | |||
| 366 | /* if no chain element available: return with error */ | ||
| 367 | if (!acep) { | ||
| 368 | return -ENOMEM; | ||
| 369 | } | ||
| 370 | |||
| 371 | /* fill in the new chain element values */ | ||
| 372 | acep->chain = acp; | ||
| 373 | acep->context = urb->context; | ||
| 374 | acep->complete = urb->complete; | ||
| 375 | acep->urbp = urb; | ||
| 376 | INIT_LIST_HEAD (&acep->list); | ||
| 377 | |||
| 378 | /* modify urb */ | ||
| 379 | urb->context = acep; | ||
| 380 | urb->complete = auerchain_complete; | ||
| 381 | urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */ | ||
| 382 | |||
| 383 | /* add element to chain - or start it immediately */ | ||
| 384 | spin_lock_irqsave (&acp->lock, flags); | ||
| 385 | if (acp->active) { | ||
| 386 | /* there is traffic in the chain, simple add element to chain */ | ||
| 387 | if (early) { | ||
| 388 | dbg ("adding new urb to head of chain"); | ||
| 389 | list_add (&acep->list, &acp->waiting_list); | ||
| 390 | } else { | ||
| 391 | dbg ("adding new urb to end of chain"); | ||
| 392 | list_add_tail (&acep->list, &acp->waiting_list); | ||
| 393 | } | ||
| 394 | acep = NULL; | ||
| 395 | } else { | ||
| 396 | /* the chain is empty. Prepare restart */ | ||
| 397 | acp->active = acep; | ||
| 398 | } | ||
| 399 | /* Spin has to be removed before usb_submit_urb! */ | ||
| 400 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 401 | |||
| 402 | /* Submit urb if immediate restart */ | ||
| 403 | if (acep) { | ||
| 404 | dbg("submitting urb immediate"); | ||
| 405 | urb->status = 0; /* needed! */ | ||
| 406 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
| 407 | /* check for submit errors */ | ||
| 408 | if (result) { | ||
| 409 | urb->status = result; | ||
| 410 | dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result); | ||
| 411 | /* and do error handling via completion function */ | ||
| 412 | auerchain_complete( urb); | ||
| 413 | } | ||
| 414 | } | ||
| 415 | |||
| 416 | return 0; | ||
| 417 | } | ||
| 418 | |||
| 419 | /* submit function for chained urbs | ||
| 420 | this function may be called from completion context or from user space! | ||
| 421 | */ | ||
| 422 | static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb) | ||
| 423 | { | ||
| 424 | return auerchain_submit_urb_list (acp, urb, 0); | ||
| 425 | } | ||
| 426 | |||
| 427 | /* cancel an urb which is submitted to the chain | ||
| 428 | the result is 0 if the urb is cancelled, or -EINPROGRESS if | ||
| 429 | the function is successfully started. | ||
| 430 | */ | ||
| 431 | static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb) | ||
| 432 | { | ||
| 433 | unsigned long flags; | ||
| 434 | struct urb * urbp; | ||
| 435 | pauerchainelement_t acep; | ||
| 436 | struct list_head *tmp; | ||
| 437 | |||
| 438 | dbg ("auerchain_unlink_urb called"); | ||
| 439 | |||
| 440 | /* search the chain of waiting elements */ | ||
| 441 | spin_lock_irqsave (&acp->lock, flags); | ||
| 442 | list_for_each (tmp, &acp->waiting_list) { | ||
| 443 | acep = list_entry (tmp, auerchainelement_t, list); | ||
| 444 | if (acep->urbp == urb) { | ||
| 445 | list_del (tmp); | ||
| 446 | urb->context = acep->context; | ||
| 447 | urb->complete = acep->complete; | ||
| 448 | list_add_tail (&acep->list, &acp->free_list); | ||
| 449 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 450 | dbg ("unlink waiting urb"); | ||
| 451 | urb->status = -ENOENT; | ||
| 452 | urb->complete (urb); | ||
| 453 | return 0; | ||
| 454 | } | ||
| 455 | } | ||
| 456 | /* not found. */ | ||
| 457 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 458 | |||
| 459 | /* get the active urb */ | ||
| 460 | acep = acp->active; | ||
| 461 | if (acep) { | ||
| 462 | urbp = acep->urbp; | ||
| 463 | |||
| 464 | /* check if we have to cancel the active urb */ | ||
| 465 | if (urbp == urb) { | ||
| 466 | /* note that there is a race condition between the check above | ||
| 467 | and the unlink() call because of no lock. This race is harmless, | ||
| 468 | because the usb module will detect the unlink() after completion. | ||
| 469 | We can't use the acp->lock here because the completion function | ||
| 470 | wants to grab it. | ||
| 471 | */ | ||
| 472 | dbg ("unlink active urb"); | ||
| 473 | return usb_unlink_urb (urbp); | ||
| 474 | } | ||
| 475 | } | ||
| 476 | |||
| 477 | /* not found anyway | ||
| 478 | ... is some kind of success | ||
| 479 | */ | ||
| 480 | dbg ("urb to unlink not found in chain"); | ||
| 481 | return 0; | ||
| 482 | } | ||
| 483 | |||
| 484 | /* cancel all urbs which are in the chain. | ||
| 485 | this function must not be called from interrupt or completion handler. | ||
| 486 | */ | ||
| 487 | static void auerchain_unlink_all (pauerchain_t acp) | ||
| 488 | { | ||
| 489 | unsigned long flags; | ||
| 490 | struct urb * urbp; | ||
| 491 | pauerchainelement_t acep; | ||
| 492 | |||
| 493 | dbg ("auerchain_unlink_all called"); | ||
| 494 | |||
| 495 | /* clear the chain of waiting elements */ | ||
| 496 | spin_lock_irqsave (&acp->lock, flags); | ||
| 497 | while (!list_empty (&acp->waiting_list)) { | ||
| 498 | /* get the next entry */ | ||
| 499 | struct list_head *tmp = acp->waiting_list.next; | ||
| 500 | list_del (tmp); | ||
| 501 | acep = list_entry (tmp, auerchainelement_t, list); | ||
| 502 | urbp = acep->urbp; | ||
| 503 | urbp->context = acep->context; | ||
| 504 | urbp->complete = acep->complete; | ||
| 505 | list_add_tail (&acep->list, &acp->free_list); | ||
| 506 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 507 | dbg ("unlink waiting urb"); | ||
| 508 | urbp->status = -ENOENT; | ||
| 509 | urbp->complete (urbp); | ||
| 510 | spin_lock_irqsave (&acp->lock, flags); | ||
| 511 | } | ||
| 512 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 513 | |||
| 514 | /* clear the active urb */ | ||
| 515 | acep = acp->active; | ||
| 516 | if (acep) { | ||
| 517 | urbp = acep->urbp; | ||
| 518 | dbg ("unlink active urb"); | ||
| 519 | usb_kill_urb (urbp); | ||
| 520 | } | ||
| 521 | } | ||
| 522 | |||
| 523 | |||
| 524 | /* free the chain. | ||
| 525 | this function must not be called from interrupt or completion handler. | ||
| 526 | */ | ||
| 527 | static void auerchain_free (pauerchain_t acp) | ||
| 528 | { | ||
| 529 | unsigned long flags; | ||
| 530 | pauerchainelement_t acep; | ||
| 531 | |||
| 532 | dbg ("auerchain_free called"); | ||
| 533 | |||
| 534 | /* first, cancel all pending urbs */ | ||
| 535 | auerchain_unlink_all (acp); | ||
| 536 | |||
| 537 | /* free the elements */ | ||
| 538 | spin_lock_irqsave (&acp->lock, flags); | ||
| 539 | while (!list_empty (&acp->free_list)) { | ||
| 540 | /* get the next entry */ | ||
| 541 | struct list_head *tmp = acp->free_list.next; | ||
| 542 | list_del (tmp); | ||
| 543 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 544 | acep = list_entry (tmp, auerchainelement_t, list); | ||
| 545 | kfree (acep); | ||
| 546 | spin_lock_irqsave (&acp->lock, flags); | ||
| 547 | } | ||
| 548 | spin_unlock_irqrestore (&acp->lock, flags); | ||
| 549 | } | ||
| 550 | |||
| 551 | |||
| 552 | /* Init the chain control structure */ | ||
| 553 | static void auerchain_init (pauerchain_t acp) | ||
| 554 | { | ||
| 555 | /* init the chain data structure */ | ||
| 556 | acp->active = NULL; | ||
| 557 | spin_lock_init (&acp->lock); | ||
| 558 | INIT_LIST_HEAD (&acp->waiting_list); | ||
| 559 | INIT_LIST_HEAD (&acp->free_list); | ||
| 560 | } | ||
| 561 | |||
| 562 | /* setup a chain. | ||
| 563 | It is assumed that there is no concurrency while setting up the chain | ||
| 564 | requirement: auerchain_init() | ||
| 565 | */ | ||
| 566 | static int auerchain_setup (pauerchain_t acp, unsigned int numElements) | ||
| 567 | { | ||
| 568 | pauerchainelement_t acep; | ||
| 569 | |||
| 570 | dbg ("auerchain_setup called with %d elements", numElements); | ||
| 571 | |||
| 572 | /* fill the list of free elements */ | ||
| 573 | for (;numElements; numElements--) { | ||
| 574 | acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL); | ||
| 575 | if (!acep) | ||
| 576 | goto ac_fail; | ||
| 577 | INIT_LIST_HEAD (&acep->list); | ||
| 578 | list_add_tail (&acep->list, &acp->free_list); | ||
| 579 | } | ||
| 580 | return 0; | ||
| 581 | |||
| 582 | ac_fail:/* free the elements */ | ||
| 583 | while (!list_empty (&acp->free_list)) { | ||
| 584 | /* get the next entry */ | ||
| 585 | struct list_head *tmp = acp->free_list.next; | ||
| 586 | list_del (tmp); | ||
| 587 | acep = list_entry (tmp, auerchainelement_t, list); | ||
| 588 | kfree (acep); | ||
| 589 | } | ||
| 590 | return -ENOMEM; | ||
| 591 | } | ||
| 592 | |||
| 593 | |||
| 594 | /* completion handler for synchronous chained URBs */ | ||
| 595 | static void auerchain_blocking_completion (struct urb *urb) | ||
| 596 | { | ||
| 597 | pauerchain_chs_t pchs = urb->context; | ||
| 598 | pchs->done = 1; | ||
| 599 | wmb(); | ||
| 600 | wake_up (&pchs->wqh); | ||
| 601 | } | ||
| 602 | |||
| 603 | |||
| 604 | /* Starts chained urb and waits for completion or timeout */ | ||
| 605 | static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length) | ||
| 606 | { | ||
| 607 | auerchain_chs_t chs; | ||
| 608 | int status; | ||
| 609 | |||
| 610 | dbg ("auerchain_start_wait_urb called"); | ||
| 611 | init_waitqueue_head (&chs.wqh); | ||
| 612 | chs.done = 0; | ||
| 613 | |||
| 614 | urb->context = &chs; | ||
| 615 | status = auerchain_submit_urb (acp, urb); | ||
| 616 | if (status) | ||
| 617 | /* something went wrong */ | ||
| 618 | return status; | ||
| 619 | |||
| 620 | timeout = wait_event_timeout(chs.wqh, chs.done, timeout); | ||
| 621 | |||
| 622 | if (!timeout && !chs.done) { | ||
| 623 | if (urb->status != -EINPROGRESS) { /* No callback?!! */ | ||
| 624 | dbg ("auerchain_start_wait_urb: raced timeout"); | ||
| 625 | status = urb->status; | ||
| 626 | } else { | ||
| 627 | dbg ("auerchain_start_wait_urb: timeout"); | ||
| 628 | auerchain_unlink_urb (acp, urb); /* remove urb safely */ | ||
| 629 | status = -ETIMEDOUT; | ||
| 630 | } | ||
| 631 | } else | ||
| 632 | status = urb->status; | ||
| 633 | |||
| 634 | if (status >= 0) | ||
| 635 | *actual_length = urb->actual_length; | ||
| 636 | |||
| 637 | return status; | ||
| 638 | } | ||
| 639 | |||
| 640 | |||
| 641 | /* auerchain_control_msg - Builds a control urb, sends it off and waits for completion | ||
| 642 | acp: pointer to the auerchain | ||
| 643 | dev: pointer to the usb device to send the message to | ||
| 644 | pipe: endpoint "pipe" to send the message to | ||
| 645 | request: USB message request value | ||
| 646 | requesttype: USB message request type value | ||
| 647 | value: USB message value | ||
| 648 | index: USB message index value | ||
| 649 | data: pointer to the data to send | ||
| 650 | size: length in bytes of the data to send | ||
| 651 | timeout: time to wait for the message to complete before timing out (if 0 the wait is forever) | ||
| 652 | |||
| 653 | This function sends a simple control message to a specified endpoint | ||
| 654 | and waits for the message to complete, or timeout. | ||
| 655 | |||
| 656 | If successful, it returns the transferred length, otherwise a negative error number. | ||
| 657 | |||
| 658 | Don't use this function from within an interrupt context, like a | ||
| 659 | bottom half handler. If you need an asynchronous message, or need to send | ||
| 660 | a message from within interrupt context, use auerchain_submit_urb() | ||
| 661 | */ | ||
| 662 | static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, | ||
| 663 | __u16 value, __u16 index, void *data, __u16 size, int timeout) | ||
| 664 | { | ||
| 665 | int ret; | ||
| 666 | struct usb_ctrlrequest *dr; | ||
| 667 | struct urb *urb; | ||
| 668 | int uninitialized_var(length); | ||
| 669 | |||
| 670 | dbg ("auerchain_control_msg"); | ||
| 671 | dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); | ||
| 672 | if (!dr) | ||
| 673 | return -ENOMEM; | ||
| 674 | urb = usb_alloc_urb (0, GFP_KERNEL); | ||
| 675 | if (!urb) { | ||
| 676 | kfree (dr); | ||
| 677 | return -ENOMEM; | ||
| 678 | } | ||
| 679 | |||
| 680 | dr->bRequestType = requesttype; | ||
| 681 | dr->bRequest = request; | ||
| 682 | dr->wValue = cpu_to_le16 (value); | ||
| 683 | dr->wIndex = cpu_to_le16 (index); | ||
| 684 | dr->wLength = cpu_to_le16 (size); | ||
| 685 | |||
| 686 | usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */ | ||
| 687 | auerchain_blocking_completion, NULL); | ||
| 688 | ret = auerchain_start_wait_urb (acp, urb, timeout, &length); | ||
| 689 | |||
| 690 | usb_free_urb (urb); | ||
| 691 | kfree (dr); | ||
| 692 | |||
| 693 | if (ret < 0) | ||
| 694 | return ret; | ||
| 695 | else | ||
| 696 | return length; | ||
| 697 | } | ||
| 698 | |||
| 699 | |||
| 700 | /*-------------------------------------------------------------------*/ | ||
| 701 | /* Buffer List helper functions */ | ||
| 702 | |||
| 703 | /* free a single auerbuf */ | ||
| 704 | static void auerbuf_free (pauerbuf_t bp) | ||
| 705 | { | ||
| 706 | kfree(bp->bufp); | ||
| 707 | kfree(bp->dr); | ||
| 708 | usb_free_urb(bp->urbp); | ||
| 709 | kfree(bp); | ||
| 710 | } | ||
| 711 | |||
| 712 | /* free the buffers from an auerbuf list */ | ||
| 713 | static void auerbuf_free_list (struct list_head *q) | ||
| 714 | { | ||
| 715 | struct list_head *tmp; | ||
| 716 | struct list_head *p; | ||
| 717 | pauerbuf_t bp; | ||
| 718 | |||
| 719 | dbg ("auerbuf_free_list"); | ||
| 720 | for (p = q->next; p != q;) { | ||
| 721 | bp = list_entry (p, auerbuf_t, buff_list); | ||
| 722 | tmp = p->next; | ||
| 723 | list_del (p); | ||
| 724 | p = tmp; | ||
| 725 | auerbuf_free (bp); | ||
| 726 | } | ||
| 727 | } | ||
| 728 | |||
| 729 | /* init the members of a list control block */ | ||
| 730 | static void auerbuf_init (pauerbufctl_t bcp) | ||
| 731 | { | ||
| 732 | dbg ("auerbuf_init"); | ||
| 733 | spin_lock_init (&bcp->lock); | ||
| 734 | INIT_LIST_HEAD (&bcp->free_buff_list); | ||
| 735 | INIT_LIST_HEAD (&bcp->rec_buff_list); | ||
| 736 | } | ||
| 737 | |||
| 738 | /* free all buffers from an auerbuf chain */ | ||
| 739 | static void auerbuf_free_buffers (pauerbufctl_t bcp) | ||
| 740 | { | ||
| 741 | unsigned long flags; | ||
| 742 | dbg ("auerbuf_free_buffers"); | ||
| 743 | |||
| 744 | spin_lock_irqsave (&bcp->lock, flags); | ||
| 745 | |||
| 746 | auerbuf_free_list (&bcp->free_buff_list); | ||
| 747 | auerbuf_free_list (&bcp->rec_buff_list); | ||
| 748 | |||
| 749 | spin_unlock_irqrestore (&bcp->lock, flags); | ||
| 750 | } | ||
| 751 | |||
| 752 | /* setup a list of buffers */ | ||
| 753 | /* requirement: auerbuf_init() */ | ||
| 754 | static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize) | ||
| 755 | { | ||
| 756 | pauerbuf_t bep = NULL; | ||
| 757 | |||
| 758 | dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize); | ||
| 759 | |||
| 760 | /* fill the list of free elements */ | ||
| 761 | for (;numElements; numElements--) { | ||
| 762 | bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL); | ||
| 763 | if (!bep) | ||
| 764 | goto bl_fail; | ||
| 765 | bep->list = bcp; | ||
| 766 | INIT_LIST_HEAD (&bep->buff_list); | ||
| 767 | bep->bufp = kmalloc (bufsize, GFP_KERNEL); | ||
| 768 | if (!bep->bufp) | ||
| 769 | goto bl_fail; | ||
| 770 | bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL); | ||
| 771 | if (!bep->dr) | ||
| 772 | goto bl_fail; | ||
| 773 | bep->urbp = usb_alloc_urb (0, GFP_KERNEL); | ||
| 774 | if (!bep->urbp) | ||
| 775 | goto bl_fail; | ||
| 776 | list_add_tail (&bep->buff_list, &bcp->free_buff_list); | ||
| 777 | } | ||
| 778 | return 0; | ||
| 779 | |||
| 780 | bl_fail:/* not enough memory. Free allocated elements */ | ||
| 781 | dbg ("auerbuf_setup: no more memory"); | ||
| 782 | auerbuf_free(bep); | ||
| 783 | auerbuf_free_buffers (bcp); | ||
| 784 | return -ENOMEM; | ||
| 785 | } | ||
| 786 | |||
| 787 | /* insert a used buffer into the free list */ | ||
| 788 | static void auerbuf_releasebuf( pauerbuf_t bp) | ||
| 789 | { | ||
| 790 | unsigned long flags; | ||
| 791 | pauerbufctl_t bcp = bp->list; | ||
| 792 | bp->retries = 0; | ||
| 793 | |||
| 794 | dbg ("auerbuf_releasebuf called"); | ||
| 795 | spin_lock_irqsave (&bcp->lock, flags); | ||
| 796 | list_add_tail (&bp->buff_list, &bcp->free_buff_list); | ||
| 797 | spin_unlock_irqrestore (&bcp->lock, flags); | ||
| 798 | } | ||
| 799 | |||
| 800 | |||
| 801 | /*-------------------------------------------------------------------*/ | ||
| 802 | /* Completion handlers */ | ||
| 803 | |||
| 804 | /* Values of urb->status or results of usb_submit_urb(): | ||
| 805 | 0 Initial, OK | ||
| 806 | -EINPROGRESS during submission until end | ||
| 807 | -ENOENT if urb is unlinked | ||
| 808 | -ETIME Device did not respond | ||
| 809 | -ENOMEM Memory Overflow | ||
| 810 | -ENODEV Specified USB-device or bus doesn't exist | ||
| 811 | -ENXIO URB already queued | ||
| 812 | -EINVAL a) Invalid transfer type specified (or not supported) | ||
| 813 | b) Invalid interrupt interval (0n256) | ||
| 814 | -EAGAIN a) Specified ISO start frame too early | ||
| 815 | b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again. | ||
| 816 | -EFBIG Too much ISO frames requested (currently uhci900) | ||
| 817 | -EPIPE Specified pipe-handle/Endpoint is already stalled | ||
| 818 | -EMSGSIZE Endpoint message size is zero, do interface/alternate setting | ||
| 819 | -EPROTO a) Bitstuff error | ||
| 820 | b) Unknown USB error | ||
| 821 | -EILSEQ CRC mismatch | ||
| 822 | -ENOSR Buffer error | ||
| 823 | -EREMOTEIO Short packet detected | ||
| 824 | -EXDEV ISO transfer only partially completed look at individual frame status for details | ||
| 825 | -EINVAL ISO madness, if this happens: Log off and go home | ||
| 826 | -EOVERFLOW babble | ||
| 827 | */ | ||
| 828 | |||
| 829 | /* check if a status code allows a retry */ | ||
| 830 | static int auerswald_status_retry (int status) | ||
| 831 | { | ||
| 832 | switch (status) { | ||
| 833 | case 0: | ||
| 834 | case -ETIME: | ||
| 835 | case -EOVERFLOW: | ||
| 836 | case -EAGAIN: | ||
| 837 | case -EPIPE: | ||
| 838 | case -EPROTO: | ||
| 839 | case -EILSEQ: | ||
| 840 | case -ENOSR: | ||
| 841 | case -EREMOTEIO: | ||
| 842 | return 1; /* do a retry */ | ||
| 843 | } | ||
| 844 | return 0; /* no retry possible */ | ||
| 845 | } | ||
| 846 | |||
| 847 | /* Completion of asynchronous write block */ | ||
| 848 | static void auerchar_ctrlwrite_complete (struct urb * urb) | ||
| 849 | { | ||
| 850 | pauerbuf_t bp = urb->context; | ||
| 851 | pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | ||
| 852 | dbg ("auerchar_ctrlwrite_complete called"); | ||
| 853 | |||
| 854 | /* reuse the buffer */ | ||
| 855 | auerbuf_releasebuf (bp); | ||
| 856 | /* Wake up all processes waiting for a buffer */ | ||
| 857 | wake_up (&cp->bufferwait); | ||
| 858 | } | ||
| 859 | |||
| 860 | /* Completion handler for dummy retry packet */ | ||
| 861 | static void auerswald_ctrlread_wretcomplete (struct urb * urb) | ||
| 862 | { | ||
| 863 | pauerbuf_t bp = urb->context; | ||
| 864 | pauerswald_t cp; | ||
| 865 | int ret; | ||
| 866 | int status = urb->status; | ||
| 867 | |||
| 868 | dbg ("auerswald_ctrlread_wretcomplete called"); | ||
| 869 | dbg ("complete with status: %d", status); | ||
| 870 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | ||
| 871 | |||
| 872 | /* check if it is possible to advance */ | ||
| 873 | if (!auerswald_status_retry(status) || !cp->usbdev) { | ||
| 874 | /* reuse the buffer */ | ||
| 875 | err ("control dummy: transmission error %d, can not retry", status); | ||
| 876 | auerbuf_releasebuf (bp); | ||
| 877 | /* Wake up all processes waiting for a buffer */ | ||
| 878 | wake_up (&cp->bufferwait); | ||
| 879 | return; | ||
| 880 | } | ||
| 881 | |||
| 882 | /* fill the control message */ | ||
| 883 | bp->dr->bRequestType = AUT_RREQ; | ||
| 884 | bp->dr->bRequest = AUV_RBLOCK; | ||
| 885 | bp->dr->wLength = bp->dr->wValue; /* temporary stored */ | ||
| 886 | bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */ | ||
| 887 | /* bp->dr->index = channel id; remains */ | ||
| 888 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0), | ||
| 889 | (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength), | ||
| 890 | auerswald_ctrlread_complete,bp); | ||
| 891 | |||
| 892 | /* submit the control msg as next paket */ | ||
| 893 | ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1); | ||
| 894 | if (ret) { | ||
| 895 | dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret); | ||
| 896 | bp->urbp->status = ret; | ||
| 897 | auerswald_ctrlread_complete (bp->urbp); | ||
| 898 | } | ||
| 899 | } | ||
| 900 | |||
| 901 | /* completion handler for receiving of control messages */ | ||
| 902 | static void auerswald_ctrlread_complete (struct urb * urb) | ||
| 903 | { | ||
| 904 | unsigned int serviceid; | ||
| 905 | pauerswald_t cp; | ||
| 906 | pauerscon_t scp; | ||
| 907 | pauerbuf_t bp = urb->context; | ||
| 908 | int status = urb->status; | ||
| 909 | int ret; | ||
| 910 | |||
| 911 | dbg ("auerswald_ctrlread_complete called"); | ||
| 912 | |||
| 913 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | ||
| 914 | |||
| 915 | /* check if there is valid data in this urb */ | ||
| 916 | if (status) { | ||
| 917 | dbg ("complete with non-zero status: %d", status); | ||
| 918 | /* should we do a retry? */ | ||
| 919 | if (!auerswald_status_retry(status) | ||
| 920 | || !cp->usbdev | ||
| 921 | || (cp->version < AUV_RETRY) | ||
| 922 | || (bp->retries >= AU_RETRIES)) { | ||
| 923 | /* reuse the buffer */ | ||
| 924 | err ("control read: transmission error %d, can not retry", status); | ||
| 925 | auerbuf_releasebuf (bp); | ||
| 926 | /* Wake up all processes waiting for a buffer */ | ||
| 927 | wake_up (&cp->bufferwait); | ||
| 928 | return; | ||
| 929 | } | ||
| 930 | bp->retries++; | ||
| 931 | dbg ("Retry count = %d", bp->retries); | ||
| 932 | /* send a long dummy control-write-message to allow device firmware to react */ | ||
| 933 | bp->dr->bRequestType = AUT_WREQ; | ||
| 934 | bp->dr->bRequest = AUV_DUMMY; | ||
| 935 | bp->dr->wValue = bp->dr->wLength; /* temporary storage */ | ||
| 936 | // bp->dr->wIndex channel ID remains | ||
| 937 | bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */ | ||
| 938 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0), | ||
| 939 | (unsigned char*)bp->dr, bp->bufp, 32, | ||
| 940 | auerswald_ctrlread_wretcomplete,bp); | ||
| 941 | |||
| 942 | /* submit the control msg as next paket */ | ||
| 943 | ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1); | ||
| 944 | if (ret) { | ||
| 945 | dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret); | ||
| 946 | bp->urbp->status = ret; | ||
| 947 | auerswald_ctrlread_wretcomplete (bp->urbp); | ||
| 948 | } | ||
| 949 | return; | ||
| 950 | } | ||
| 951 | |||
| 952 | /* get the actual bytecount (incl. headerbyte) */ | ||
| 953 | bp->len = urb->actual_length; | ||
| 954 | serviceid = bp->bufp[0] & AUH_TYPEMASK; | ||
| 955 | dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len); | ||
| 956 | |||
| 957 | /* dispatch the paket */ | ||
| 958 | scp = cp->services[serviceid]; | ||
| 959 | if (scp) { | ||
| 960 | /* look, Ma, a listener! */ | ||
| 961 | scp->dispatch (scp, bp); | ||
| 962 | } | ||
| 963 | |||
| 964 | /* release the paket */ | ||
| 965 | auerbuf_releasebuf (bp); | ||
| 966 | /* Wake up all processes waiting for a buffer */ | ||
| 967 | wake_up (&cp->bufferwait); | ||
| 968 | } | ||
| 969 | |||
| 970 | /*-------------------------------------------------------------------*/ | ||
| 971 | /* Handling of Interrupt Endpoint */ | ||
| 972 | /* This interrupt Endpoint is used to inform the host about waiting | ||
| 973 | messages from the USB device. | ||
| 974 | */ | ||
| 975 | /* int completion handler. */ | ||
| 976 | static void auerswald_int_complete (struct urb * urb) | ||
| 977 | { | ||
| 978 | unsigned long flags; | ||
| 979 | unsigned int channelid; | ||
| 980 | unsigned int bytecount; | ||
| 981 | int ret; | ||
| 982 | int status = urb->status; | ||
| 983 | pauerbuf_t bp = NULL; | ||
| 984 | pauerswald_t cp = urb->context; | ||
| 985 | |||
| 986 | dbg ("%s called", __func__); | ||
| 987 | |||
| 988 | switch (status) { | ||
| 989 | case 0: | ||
| 990 | /* success */ | ||
| 991 | break; | ||
| 992 | case -ECONNRESET: | ||
| 993 | case -ENOENT: | ||
| 994 | case -ESHUTDOWN: | ||
| 995 | /* this urb is terminated, clean up */ | ||
| 996 | dbg("%s - urb shutting down with status: %d", __func__, status); | ||
| 997 | return; | ||
| 998 | default: | ||
| 999 | dbg("%s - nonzero urb status received: %d", __func__, status); | ||
| 1000 | goto exit; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | /* check if all needed data was received */ | ||
| 1004 | if (urb->actual_length < AU_IRQMINSIZE) { | ||
| 1005 | dbg ("invalid data length received: %d bytes", urb->actual_length); | ||
| 1006 | goto exit; | ||
| 1007 | } | ||
| 1008 | |||
| 1009 | /* check the command code */ | ||
| 1010 | if (cp->intbufp[0] != AU_IRQCMDID) { | ||
| 1011 | dbg ("invalid command received: %d", cp->intbufp[0]); | ||
| 1012 | goto exit; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | /* check the command type */ | ||
| 1016 | if (cp->intbufp[1] != AU_BLOCKRDY) { | ||
| 1017 | dbg ("invalid command type received: %d", cp->intbufp[1]); | ||
| 1018 | goto exit; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | /* now extract the information */ | ||
| 1022 | channelid = cp->intbufp[2]; | ||
| 1023 | bytecount = (unsigned char)cp->intbufp[3]; | ||
| 1024 | bytecount |= (unsigned char)cp->intbufp[4] << 8; | ||
| 1025 | |||
| 1026 | /* check the channel id */ | ||
| 1027 | if (channelid >= AUH_TYPESIZE) { | ||
| 1028 | dbg ("invalid channel id received: %d", channelid); | ||
| 1029 | goto exit; | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | /* check the byte count */ | ||
| 1033 | if (bytecount > (cp->maxControlLength+AUH_SIZE)) { | ||
| 1034 | dbg ("invalid byte count received: %d", bytecount); | ||
| 1035 | goto exit; | ||
| 1036 | } | ||
| 1037 | dbg ("Service Channel = %d", channelid); | ||
| 1038 | dbg ("Byte Count = %d", bytecount); | ||
| 1039 | |||
| 1040 | /* get a buffer for the next data paket */ | ||
| 1041 | spin_lock_irqsave (&cp->bufctl.lock, flags); | ||
| 1042 | if (!list_empty (&cp->bufctl.free_buff_list)) { | ||
| 1043 | /* yes: get the entry */ | ||
| 1044 | struct list_head *tmp = cp->bufctl.free_buff_list.next; | ||
| 1045 | list_del (tmp); | ||
| 1046 | bp = list_entry (tmp, auerbuf_t, buff_list); | ||
| 1047 | } | ||
| 1048 | spin_unlock_irqrestore (&cp->bufctl.lock, flags); | ||
| 1049 | |||
| 1050 | /* if no buffer available: skip it */ | ||
| 1051 | if (!bp) { | ||
| 1052 | dbg ("auerswald_int_complete: no data buffer available"); | ||
| 1053 | /* can we do something more? | ||
| 1054 | This is a big problem: if this int packet is ignored, the | ||
| 1055 | device will wait forever and not signal any more data. | ||
| 1056 | The only real solution is: having enough buffers! | ||
| 1057 | Or perhaps temporary disabling the int endpoint? | ||
| 1058 | */ | ||
| 1059 | goto exit; | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | /* fill the control message */ | ||
| 1063 | bp->dr->bRequestType = AUT_RREQ; | ||
| 1064 | bp->dr->bRequest = AUV_RBLOCK; | ||
| 1065 | bp->dr->wValue = cpu_to_le16 (0); | ||
| 1066 | bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT); | ||
| 1067 | bp->dr->wLength = cpu_to_le16 (bytecount); | ||
| 1068 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0), | ||
| 1069 | (unsigned char*)bp->dr, bp->bufp, bytecount, | ||
| 1070 | auerswald_ctrlread_complete,bp); | ||
| 1071 | |||
| 1072 | /* submit the control msg */ | ||
| 1073 | ret = auerchain_submit_urb (&cp->controlchain, bp->urbp); | ||
| 1074 | if (ret) { | ||
| 1075 | dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret); | ||
| 1076 | bp->urbp->status = ret; | ||
| 1077 | auerswald_ctrlread_complete( bp->urbp); | ||
| 1078 | /* here applies the same problem as above: device locking! */ | ||
| 1079 | } | ||
| 1080 | exit: | ||
| 1081 | ret = usb_submit_urb (urb, GFP_ATOMIC); | ||
| 1082 | if (ret) | ||
| 1083 | err ("%s - usb_submit_urb failed with result %d", | ||
| 1084 | __func__, ret); | ||
| 1085 | } | ||
| 1086 | |||
| 1087 | /* int memory deallocation | ||
| 1088 | NOTE: no mutex please! | ||
| 1089 | */ | ||
| 1090 | static void auerswald_int_free (pauerswald_t cp) | ||
| 1091 | { | ||
| 1092 | if (cp->inturbp) { | ||
| 1093 | usb_free_urb(cp->inturbp); | ||
| 1094 | cp->inturbp = NULL; | ||
| 1095 | } | ||
| 1096 | kfree(cp->intbufp); | ||
| 1097 | cp->intbufp = NULL; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | /* This function is called to activate the interrupt | ||
| 1101 | endpoint. This function returns 0 if successful or an error code. | ||
| 1102 | NOTE: no mutex please! | ||
| 1103 | */ | ||
| 1104 | static int auerswald_int_open (pauerswald_t cp) | ||
| 1105 | { | ||
| 1106 | int ret; | ||
| 1107 | struct usb_host_endpoint *ep; | ||
| 1108 | int irqsize; | ||
| 1109 | dbg ("auerswald_int_open"); | ||
| 1110 | |||
| 1111 | ep = cp->usbdev->ep_in[AU_IRQENDP]; | ||
| 1112 | if (!ep) { | ||
| 1113 | ret = -EFAULT; | ||
| 1114 | goto intoend; | ||
| 1115 | } | ||
| 1116 | irqsize = le16_to_cpu(ep->desc.wMaxPacketSize); | ||
| 1117 | cp->irqsize = irqsize; | ||
| 1118 | |||
| 1119 | /* allocate the urb and data buffer */ | ||
| 1120 | if (!cp->inturbp) { | ||
| 1121 | cp->inturbp = usb_alloc_urb (0, GFP_KERNEL); | ||
| 1122 | if (!cp->inturbp) { | ||
| 1123 | ret = -ENOMEM; | ||
| 1124 | goto intoend; | ||
| 1125 | } | ||
| 1126 | } | ||
| 1127 | if (!cp->intbufp) { | ||
| 1128 | cp->intbufp = kmalloc (irqsize, GFP_KERNEL); | ||
| 1129 | if (!cp->intbufp) { | ||
| 1130 | ret = -ENOMEM; | ||
| 1131 | goto intoend; | ||
| 1132 | } | ||
| 1133 | } | ||
| 1134 | /* setup urb */ | ||
| 1135 | usb_fill_int_urb (cp->inturbp, cp->usbdev, | ||
| 1136 | usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp, | ||
| 1137 | irqsize, auerswald_int_complete, cp, ep->desc.bInterval); | ||
| 1138 | /* start the urb */ | ||
| 1139 | cp->inturbp->status = 0; /* needed! */ | ||
| 1140 | ret = usb_submit_urb (cp->inturbp, GFP_KERNEL); | ||
| 1141 | |||
| 1142 | intoend: | ||
| 1143 | if (ret < 0) { | ||
| 1144 | /* activation of interrupt endpoint has failed. Now clean up. */ | ||
| 1145 | dbg ("auerswald_int_open: activation of int endpoint failed"); | ||
| 1146 | |||
| 1147 | /* deallocate memory */ | ||
| 1148 | auerswald_int_free (cp); | ||
| 1149 | } | ||
| 1150 | return ret; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | /* This function is called to deactivate the interrupt | ||
| 1154 | endpoint. This function returns 0 if successful or an error code. | ||
| 1155 | NOTE: no mutex please! | ||
| 1156 | */ | ||
| 1157 | static void auerswald_int_release (pauerswald_t cp) | ||
| 1158 | { | ||
| 1159 | dbg ("auerswald_int_release"); | ||
| 1160 | |||
| 1161 | /* stop the int endpoint */ | ||
| 1162 | usb_kill_urb (cp->inturbp); | ||
| 1163 | |||
| 1164 | /* deallocate memory */ | ||
| 1165 | auerswald_int_free (cp); | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | /* --------------------------------------------------------------------- */ | ||
| 1169 | /* Helper functions */ | ||
| 1170 | |||
| 1171 | /* wake up waiting readers */ | ||
| 1172 | static void auerchar_disconnect (pauerscon_t scp) | ||
| 1173 | { | ||
| 1174 | pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext))); | ||
| 1175 | dbg ("auerchar_disconnect called"); | ||
| 1176 | ccp->removed = 1; | ||
| 1177 | wake_up (&ccp->readwait); | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | |||
| 1181 | /* dispatch a read paket to a waiting character device */ | ||
| 1182 | static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp) | ||
| 1183 | { | ||
| 1184 | unsigned long flags; | ||
| 1185 | pauerchar_t ccp; | ||
| 1186 | pauerbuf_t newbp = NULL; | ||
| 1187 | char * charp; | ||
| 1188 | dbg ("auerchar_ctrlread_dispatch called"); | ||
| 1189 | ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext))); | ||
| 1190 | |||
| 1191 | /* get a read buffer from character device context */ | ||
| 1192 | spin_lock_irqsave (&ccp->bufctl.lock, flags); | ||
| 1193 | if (!list_empty (&ccp->bufctl.free_buff_list)) { | ||
| 1194 | /* yes: get the entry */ | ||
| 1195 | struct list_head *tmp = ccp->bufctl.free_buff_list.next; | ||
| 1196 | list_del (tmp); | ||
| 1197 | newbp = list_entry (tmp, auerbuf_t, buff_list); | ||
| 1198 | } | ||
| 1199 | spin_unlock_irqrestore (&ccp->bufctl.lock, flags); | ||
| 1200 | |||
| 1201 | if (!newbp) { | ||
| 1202 | dbg ("No read buffer available, discard paket!"); | ||
| 1203 | return; /* no buffer, no dispatch */ | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | /* copy information to new buffer element | ||
| 1207 | (all buffers have the same length) */ | ||
| 1208 | charp = newbp->bufp; | ||
| 1209 | newbp->bufp = bp->bufp; | ||
| 1210 | bp->bufp = charp; | ||
| 1211 | newbp->len = bp->len; | ||
| 1212 | |||
| 1213 | /* insert new buffer in read list */ | ||
| 1214 | spin_lock_irqsave (&ccp->bufctl.lock, flags); | ||
| 1215 | list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list); | ||
| 1216 | spin_unlock_irqrestore (&ccp->bufctl.lock, flags); | ||
| 1217 | dbg ("read buffer appended to rec_list"); | ||
| 1218 | |||
| 1219 | /* wake up pending synchronous reads */ | ||
| 1220 | wake_up (&ccp->readwait); | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | |||
| 1224 | /* Delete an auerswald driver context */ | ||
| 1225 | static void auerswald_delete( pauerswald_t cp) | ||
| 1226 | { | ||
| 1227 | dbg( "auerswald_delete"); | ||
| 1228 | if (cp == NULL) | ||
| 1229 | return; | ||
| 1230 | |||
| 1231 | /* Wake up all processes waiting for a buffer */ | ||
| 1232 | wake_up (&cp->bufferwait); | ||
| 1233 | |||
| 1234 | /* Cleaning up */ | ||
| 1235 | auerswald_int_release (cp); | ||
| 1236 | auerchain_free (&cp->controlchain); | ||
| 1237 | auerbuf_free_buffers (&cp->bufctl); | ||
| 1238 | |||
| 1239 | /* release the memory */ | ||
| 1240 | kfree( cp); | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | |||
| 1244 | /* Delete an auerswald character context */ | ||
| 1245 | static void auerchar_delete( pauerchar_t ccp) | ||
| 1246 | { | ||
| 1247 | dbg ("auerchar_delete"); | ||
| 1248 | if (ccp == NULL) | ||
| 1249 | return; | ||
| 1250 | |||
| 1251 | /* wake up pending synchronous reads */ | ||
| 1252 | ccp->removed = 1; | ||
| 1253 | wake_up (&ccp->readwait); | ||
| 1254 | |||
| 1255 | /* remove the read buffer */ | ||
| 1256 | if (ccp->readbuf) { | ||
| 1257 | auerbuf_releasebuf (ccp->readbuf); | ||
| 1258 | ccp->readbuf = NULL; | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | /* remove the character buffers */ | ||
| 1262 | auerbuf_free_buffers (&ccp->bufctl); | ||
| 1263 | |||
| 1264 | /* release the memory */ | ||
| 1265 | kfree( ccp); | ||
| 1266 | } | ||
| 1267 | |||
| 1268 | |||
| 1269 | /* add a new service to the device | ||
| 1270 | scp->id must be set! | ||
| 1271 | return: 0 if OK, else error code | ||
| 1272 | */ | ||
| 1273 | static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp) | ||
| 1274 | { | ||
| 1275 | int ret; | ||
| 1276 | |||
| 1277 | /* is the device available? */ | ||
| 1278 | if (!cp->usbdev) { | ||
| 1279 | dbg ("usbdev == NULL"); | ||
| 1280 | return -EIO; /*no: can not add a service, sorry*/ | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | /* is the service available? */ | ||
| 1284 | if (cp->services[scp->id]) { | ||
| 1285 | dbg ("service is busy"); | ||
| 1286 | return -EBUSY; | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | /* device is available, service is free */ | ||
| 1290 | cp->services[scp->id] = scp; | ||
| 1291 | |||
| 1292 | /* register service in device */ | ||
| 1293 | ret = auerchain_control_msg( | ||
| 1294 | &cp->controlchain, /* pointer to control chain */ | ||
| 1295 | cp->usbdev, /* pointer to device */ | ||
| 1296 | usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */ | ||
| 1297 | AUV_CHANNELCTL, /* USB message request value */ | ||
| 1298 | AUT_WREQ, /* USB message request type value */ | ||
| 1299 | 0x01, /* open USB message value */ | ||
| 1300 | scp->id, /* USB message index value */ | ||
| 1301 | NULL, /* pointer to the data to send */ | ||
| 1302 | 0, /* length in bytes of the data to send */ | ||
| 1303 | HZ * 2); /* time to wait for the message to complete before timing out */ | ||
| 1304 | if (ret < 0) { | ||
| 1305 | dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret); | ||
| 1306 | /* undo above actions */ | ||
| 1307 | cp->services[scp->id] = NULL; | ||
| 1308 | return ret; | ||
| 1309 | } | ||
| 1310 | |||
| 1311 | dbg ("auerswald_addservice: channel open OK"); | ||
| 1312 | return 0; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | |||
| 1316 | /* remove a service from the device | ||
| 1317 | scp->id must be set! */ | ||
| 1318 | static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) | ||
| 1319 | { | ||
| 1320 | dbg ("auerswald_removeservice called"); | ||
| 1321 | |||
| 1322 | /* check if we have a service allocated */ | ||
| 1323 | if (scp->id == AUH_UNASSIGNED) | ||
| 1324 | return; | ||
| 1325 | |||
| 1326 | /* If there is a device: close the channel */ | ||
| 1327 | if (cp->usbdev) { | ||
| 1328 | /* Close the service channel inside the device */ | ||
| 1329 | int ret = auerchain_control_msg( | ||
| 1330 | &cp->controlchain, /* pointer to control chain */ | ||
| 1331 | cp->usbdev, /* pointer to device */ | ||
| 1332 | usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */ | ||
| 1333 | AUV_CHANNELCTL, /* USB message request value */ | ||
| 1334 | AUT_WREQ, /* USB message request type value */ | ||
| 1335 | 0x00, // close /* USB message value */ | ||
| 1336 | scp->id, /* USB message index value */ | ||
| 1337 | NULL, /* pointer to the data to send */ | ||
| 1338 | 0, /* length in bytes of the data to send */ | ||
| 1339 | HZ * 2); /* time to wait for the message to complete before timing out */ | ||
| 1340 | if (ret < 0) { | ||
| 1341 | dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret); | ||
| 1342 | } | ||
| 1343 | else { | ||
| 1344 | dbg ("auerswald_removeservice: channel close OK"); | ||
| 1345 | } | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | /* remove the service from the device */ | ||
| 1349 | cp->services[scp->id] = NULL; | ||
| 1350 | scp->id = AUH_UNASSIGNED; | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | |||
| 1354 | /* --------------------------------------------------------------------- */ | ||
| 1355 | /* Char device functions */ | ||
| 1356 | |||
| 1357 | /* Open a new character device */ | ||
| 1358 | static int auerchar_open (struct inode *inode, struct file *file) | ||
| 1359 | { | ||
| 1360 | int dtindex = iminor(inode); | ||
| 1361 | pauerswald_t cp = NULL; | ||
| 1362 | pauerchar_t ccp = NULL; | ||
| 1363 | struct usb_interface *intf; | ||
| 1364 | int ret; | ||
| 1365 | |||
| 1366 | /* minor number in range? */ | ||
| 1367 | if (dtindex < 0) { | ||
| 1368 | return -ENODEV; | ||
| 1369 | } | ||
| 1370 | intf = usb_find_interface(&auerswald_driver, dtindex); | ||
| 1371 | if (!intf) { | ||
| 1372 | return -ENODEV; | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | /* usb device available? */ | ||
| 1376 | cp = usb_get_intfdata (intf); | ||
| 1377 | if (cp == NULL) { | ||
| 1378 | return -ENODEV; | ||
| 1379 | } | ||
| 1380 | if (mutex_lock_interruptible(&cp->mutex)) { | ||
| 1381 | return -ERESTARTSYS; | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | /* we have access to the device. Now lets allocate memory */ | ||
| 1385 | ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL); | ||
| 1386 | if (ccp == NULL) { | ||
| 1387 | err ("out of memory"); | ||
| 1388 | ret = -ENOMEM; | ||
| 1389 | goto ofail; | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | /* Initialize device descriptor */ | ||
| 1393 | mutex_init(&ccp->mutex); | ||
| 1394 | mutex_init(&ccp->readmutex); | ||
| 1395 | auerbuf_init (&ccp->bufctl); | ||
| 1396 | ccp->scontext.id = AUH_UNASSIGNED; | ||
| 1397 | ccp->scontext.dispatch = auerchar_ctrlread_dispatch; | ||
| 1398 | ccp->scontext.disconnect = auerchar_disconnect; | ||
| 1399 | init_waitqueue_head (&ccp->readwait); | ||
| 1400 | |||
| 1401 | ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE); | ||
| 1402 | if (ret) { | ||
| 1403 | goto ofail; | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | cp->open_count++; | ||
| 1407 | ccp->auerdev = cp; | ||
| 1408 | dbg("open %s as /dev/%s", cp->dev_desc, cp->name); | ||
| 1409 | mutex_unlock(&cp->mutex); | ||
| 1410 | |||
| 1411 | /* file IO stuff */ | ||
| 1412 | file->f_pos = 0; | ||
| 1413 | file->private_data = ccp; | ||
| 1414 | return nonseekable_open(inode, file); | ||
| 1415 | |||
| 1416 | /* Error exit */ | ||
| 1417 | ofail: mutex_unlock(&cp->mutex); | ||
| 1418 | auerchar_delete (ccp); | ||
| 1419 | return ret; | ||
| 1420 | } | ||
| 1421 | |||
| 1422 | |||
| 1423 | /* IOCTL functions */ | ||
| 1424 | static long auerchar_ioctl(struct file *file, unsigned int cmd, | ||
| 1425 | unsigned long arg) | ||
| 1426 | { | ||
| 1427 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
| 1428 | int ret = 0; | ||
| 1429 | audevinfo_t devinfo; | ||
| 1430 | pauerswald_t cp = NULL; | ||
| 1431 | unsigned int u; | ||
| 1432 | unsigned int __user *user_arg = (unsigned int __user *)arg; | ||
| 1433 | |||
| 1434 | dbg ("ioctl"); | ||
| 1435 | |||
| 1436 | /* get the mutexes */ | ||
| 1437 | if (mutex_lock_interruptible(&ccp->mutex)) { | ||
| 1438 | return -ERESTARTSYS; | ||
| 1439 | } | ||
| 1440 | cp = ccp->auerdev; | ||
| 1441 | if (!cp) { | ||
| 1442 | mutex_unlock(&ccp->mutex); | ||
| 1443 | return -ENODEV; | ||
| 1444 | } | ||
| 1445 | if (mutex_lock_interruptible(&cp->mutex)) { | ||
| 1446 | mutex_unlock(&ccp->mutex); | ||
| 1447 | return -ERESTARTSYS; | ||
| 1448 | } | ||
| 1449 | |||
| 1450 | /* Check for removal */ | ||
| 1451 | if (!cp->usbdev) { | ||
| 1452 | mutex_unlock(&cp->mutex); | ||
| 1453 | mutex_unlock(&ccp->mutex); | ||
| 1454 | return -ENODEV; | ||
| 1455 | } | ||
| 1456 | lock_kernel(); | ||
| 1457 | switch (cmd) { | ||
| 1458 | |||
| 1459 | /* return != 0 if Transmitt channel ready to send */ | ||
| 1460 | case IOCTL_AU_TXREADY: | ||
| 1461 | dbg ("IOCTL_AU_TXREADY"); | ||
| 1462 | u = ccp->auerdev | ||
| 1463 | && (ccp->scontext.id != AUH_UNASSIGNED) | ||
| 1464 | && !list_empty (&cp->bufctl.free_buff_list); | ||
| 1465 | ret = put_user (u, user_arg); | ||
| 1466 | break; | ||
| 1467 | |||
| 1468 | /* return != 0 if connected to a service channel */ | ||
| 1469 | case IOCTL_AU_CONNECT: | ||
| 1470 | dbg ("IOCTL_AU_CONNECT"); | ||
| 1471 | u = (ccp->scontext.id != AUH_UNASSIGNED); | ||
| 1472 | ret = put_user (u, user_arg); | ||
| 1473 | break; | ||
| 1474 | |||
| 1475 | /* return != 0 if Receive Data available */ | ||
| 1476 | case IOCTL_AU_RXAVAIL: | ||
| 1477 | dbg ("IOCTL_AU_RXAVAIL"); | ||
| 1478 | if (ccp->scontext.id == AUH_UNASSIGNED) { | ||
| 1479 | ret = -EIO; | ||
| 1480 | break; | ||
| 1481 | } | ||
| 1482 | u = 0; /* no data */ | ||
| 1483 | if (ccp->readbuf) { | ||
| 1484 | int restlen = ccp->readbuf->len - ccp->readoffset; | ||
| 1485 | if (restlen > 0) | ||
| 1486 | u = 1; | ||
| 1487 | } | ||
| 1488 | if (!u) { | ||
| 1489 | if (!list_empty (&ccp->bufctl.rec_buff_list)) { | ||
| 1490 | u = 1; | ||
| 1491 | } | ||
| 1492 | } | ||
| 1493 | ret = put_user (u, user_arg); | ||
| 1494 | break; | ||
| 1495 | |||
| 1496 | /* return the max. buffer length for the device */ | ||
| 1497 | case IOCTL_AU_BUFLEN: | ||
| 1498 | dbg ("IOCTL_AU_BUFLEN"); | ||
| 1499 | u = cp->maxControlLength; | ||
| 1500 | ret = put_user (u, user_arg); | ||
| 1501 | break; | ||
| 1502 | |||
| 1503 | /* requesting a service channel */ | ||
| 1504 | case IOCTL_AU_SERVREQ: | ||
| 1505 | dbg ("IOCTL_AU_SERVREQ"); | ||
| 1506 | /* requesting a service means: release the previous one first */ | ||
| 1507 | auerswald_removeservice (cp, &ccp->scontext); | ||
| 1508 | /* get the channel number */ | ||
| 1509 | ret = get_user (u, user_arg); | ||
| 1510 | if (ret) { | ||
| 1511 | break; | ||
| 1512 | } | ||
| 1513 | if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) { | ||
| 1514 | ret = -EIO; | ||
| 1515 | break; | ||
| 1516 | } | ||
| 1517 | dbg ("auerchar service request parameters are ok"); | ||
| 1518 | ccp->scontext.id = u; | ||
| 1519 | |||
| 1520 | /* request the service now */ | ||
| 1521 | ret = auerswald_addservice (cp, &ccp->scontext); | ||
| 1522 | if (ret) { | ||
| 1523 | /* no: revert service entry */ | ||
| 1524 | ccp->scontext.id = AUH_UNASSIGNED; | ||
| 1525 | } | ||
| 1526 | break; | ||
| 1527 | |||
| 1528 | /* get a string descriptor for the device */ | ||
| 1529 | case IOCTL_AU_DEVINFO: | ||
| 1530 | dbg ("IOCTL_AU_DEVINFO"); | ||
| 1531 | if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) { | ||
| 1532 | ret = -EFAULT; | ||
| 1533 | break; | ||
| 1534 | } | ||
| 1535 | u = strlen(cp->dev_desc)+1; | ||
| 1536 | if (u > devinfo.bsize) { | ||
| 1537 | u = devinfo.bsize; | ||
| 1538 | } | ||
| 1539 | ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0; | ||
| 1540 | break; | ||
| 1541 | |||
| 1542 | /* get the max. string descriptor length */ | ||
| 1543 | case IOCTL_AU_SLEN: | ||
| 1544 | dbg ("IOCTL_AU_SLEN"); | ||
| 1545 | u = AUSI_DLEN; | ||
| 1546 | ret = put_user (u, user_arg); | ||
| 1547 | break; | ||
| 1548 | |||
| 1549 | default: | ||
| 1550 | dbg ("IOCTL_AU_UNKNOWN"); | ||
| 1551 | ret = -ENOTTY; | ||
| 1552 | break; | ||
| 1553 | } | ||
| 1554 | unlock_kernel(); | ||
| 1555 | /* release the mutexes */ | ||
| 1556 | mutex_unlock(&cp->mutex); | ||
| 1557 | mutex_unlock(&ccp->mutex); | ||
| 1558 | return ret; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | /* Read data from the device */ | ||
| 1562 | static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos) | ||
| 1563 | { | ||
| 1564 | unsigned long flags; | ||
| 1565 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
| 1566 | pauerbuf_t bp = NULL; | ||
| 1567 | wait_queue_t wait; | ||
| 1568 | |||
| 1569 | dbg ("auerchar_read"); | ||
| 1570 | |||
| 1571 | /* Error checking */ | ||
| 1572 | if (!ccp) | ||
| 1573 | return -EIO; | ||
| 1574 | if (*ppos) | ||
| 1575 | return -ESPIPE; | ||
| 1576 | if (count == 0) | ||
| 1577 | return 0; | ||
| 1578 | |||
| 1579 | /* get the mutex */ | ||
| 1580 | if (mutex_lock_interruptible(&ccp->mutex)) | ||
| 1581 | return -ERESTARTSYS; | ||
| 1582 | |||
| 1583 | /* Can we expect to read something? */ | ||
| 1584 | if (ccp->scontext.id == AUH_UNASSIGNED) { | ||
| 1585 | mutex_unlock(&ccp->mutex); | ||
| 1586 | return -EIO; | ||
| 1587 | } | ||
| 1588 | |||
| 1589 | /* only one reader per device allowed */ | ||
| 1590 | if (mutex_lock_interruptible(&ccp->readmutex)) { | ||
| 1591 | mutex_unlock(&ccp->mutex); | ||
| 1592 | return -ERESTARTSYS; | ||
| 1593 | } | ||
| 1594 | |||
| 1595 | /* read data from readbuf, if available */ | ||
| 1596 | doreadbuf: | ||
| 1597 | bp = ccp->readbuf; | ||
| 1598 | if (bp) { | ||
| 1599 | /* read the maximum bytes */ | ||
| 1600 | int restlen = bp->len - ccp->readoffset; | ||
| 1601 | if (restlen < 0) | ||
| 1602 | restlen = 0; | ||
| 1603 | if (count > restlen) | ||
| 1604 | count = restlen; | ||
| 1605 | if (count) { | ||
| 1606 | if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) { | ||
| 1607 | dbg ("auerswald_read: copy_to_user failed"); | ||
| 1608 | mutex_unlock(&ccp->readmutex); | ||
| 1609 | mutex_unlock(&ccp->mutex); | ||
| 1610 | return -EFAULT; | ||
| 1611 | } | ||
| 1612 | } | ||
| 1613 | /* advance the read offset */ | ||
| 1614 | ccp->readoffset += count; | ||
| 1615 | restlen -= count; | ||
| 1616 | // reuse the read buffer | ||
| 1617 | if (restlen <= 0) { | ||
| 1618 | auerbuf_releasebuf (bp); | ||
| 1619 | ccp->readbuf = NULL; | ||
| 1620 | } | ||
| 1621 | /* return with number of bytes read */ | ||
| 1622 | if (count) { | ||
| 1623 | mutex_unlock(&ccp->readmutex); | ||
| 1624 | mutex_unlock(&ccp->mutex); | ||
| 1625 | return count; | ||
| 1626 | } | ||
| 1627 | } | ||
| 1628 | |||
| 1629 | /* a read buffer is not available. Try to get the next data block. */ | ||
| 1630 | doreadlist: | ||
| 1631 | /* Preparing for sleep */ | ||
| 1632 | init_waitqueue_entry (&wait, current); | ||
| 1633 | set_current_state (TASK_INTERRUPTIBLE); | ||
| 1634 | add_wait_queue (&ccp->readwait, &wait); | ||
| 1635 | |||
| 1636 | bp = NULL; | ||
| 1637 | spin_lock_irqsave (&ccp->bufctl.lock, flags); | ||
| 1638 | if (!list_empty (&ccp->bufctl.rec_buff_list)) { | ||
| 1639 | /* yes: get the entry */ | ||
| 1640 | struct list_head *tmp = ccp->bufctl.rec_buff_list.next; | ||
| 1641 | list_del (tmp); | ||
| 1642 | bp = list_entry (tmp, auerbuf_t, buff_list); | ||
| 1643 | } | ||
| 1644 | spin_unlock_irqrestore (&ccp->bufctl.lock, flags); | ||
| 1645 | |||
| 1646 | /* have we got data? */ | ||
| 1647 | if (bp) { | ||
| 1648 | ccp->readbuf = bp; | ||
| 1649 | ccp->readoffset = AUH_SIZE; /* for headerbyte */ | ||
| 1650 | set_current_state (TASK_RUNNING); | ||
| 1651 | remove_wait_queue (&ccp->readwait, &wait); | ||
| 1652 | goto doreadbuf; /* now we can read! */ | ||
| 1653 | } | ||
| 1654 | |||
| 1655 | /* no data available. Should we wait? */ | ||
| 1656 | if (file->f_flags & O_NONBLOCK) { | ||
| 1657 | dbg ("No read buffer available, returning -EAGAIN"); | ||
| 1658 | set_current_state (TASK_RUNNING); | ||
| 1659 | remove_wait_queue (&ccp->readwait, &wait); | ||
| 1660 | mutex_unlock(&ccp->readmutex); | ||
| 1661 | mutex_unlock(&ccp->mutex); | ||
| 1662 | return -EAGAIN; /* nonblocking, no data available */ | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | /* yes, we should wait! */ | ||
| 1666 | mutex_unlock(&ccp->mutex); /* allow other operations while we wait */ | ||
| 1667 | schedule(); | ||
| 1668 | remove_wait_queue (&ccp->readwait, &wait); | ||
| 1669 | if (signal_pending (current)) { | ||
| 1670 | /* waked up by a signal */ | ||
| 1671 | mutex_unlock(&ccp->readmutex); | ||
| 1672 | return -ERESTARTSYS; | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | /* Anything left to read? */ | ||
| 1676 | if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) { | ||
| 1677 | mutex_unlock(&ccp->readmutex); | ||
| 1678 | return -EIO; | ||
| 1679 | } | ||
| 1680 | |||
| 1681 | if (mutex_lock_interruptible(&ccp->mutex)) { | ||
| 1682 | mutex_unlock(&ccp->readmutex); | ||
| 1683 | return -ERESTARTSYS; | ||
| 1684 | } | ||
| 1685 | |||
| 1686 | /* try to read the incoming data again */ | ||
| 1687 | goto doreadlist; | ||
| 1688 | } | ||
| 1689 | |||
| 1690 | |||
| 1691 | /* Write a data block into the right service channel of the device */ | ||
| 1692 | static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos) | ||
| 1693 | { | ||
| 1694 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
| 1695 | pauerswald_t cp = NULL; | ||
| 1696 | pauerbuf_t bp; | ||
| 1697 | unsigned long flags; | ||
| 1698 | int ret; | ||
| 1699 | wait_queue_t wait; | ||
| 1700 | |||
| 1701 | dbg ("auerchar_write %zd bytes", len); | ||
| 1702 | |||
| 1703 | /* Error checking */ | ||
| 1704 | if (!ccp) | ||
| 1705 | return -EIO; | ||
| 1706 | if (*ppos) | ||
| 1707 | return -ESPIPE; | ||
| 1708 | if (len == 0) | ||
| 1709 | return 0; | ||
| 1710 | |||
| 1711 | write_again: | ||
| 1712 | /* get the mutex */ | ||
| 1713 | if (mutex_lock_interruptible(&ccp->mutex)) | ||
| 1714 | return -ERESTARTSYS; | ||
| 1715 | |||
| 1716 | /* Can we expect to write something? */ | ||
| 1717 | if (ccp->scontext.id == AUH_UNASSIGNED) { | ||
| 1718 | mutex_unlock(&ccp->mutex); | ||
| 1719 | return -EIO; | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | cp = ccp->auerdev; | ||
| 1723 | if (!cp) { | ||
| 1724 | mutex_unlock(&ccp->mutex); | ||
| 1725 | return -ERESTARTSYS; | ||
| 1726 | } | ||
| 1727 | if (mutex_lock_interruptible(&cp->mutex)) { | ||
| 1728 | mutex_unlock(&ccp->mutex); | ||
| 1729 | return -ERESTARTSYS; | ||
| 1730 | } | ||
| 1731 | if (!cp->usbdev) { | ||
| 1732 | mutex_unlock(&cp->mutex); | ||
| 1733 | mutex_unlock(&ccp->mutex); | ||
| 1734 | return -EIO; | ||
| 1735 | } | ||
| 1736 | /* Prepare for sleep */ | ||
| 1737 | init_waitqueue_entry (&wait, current); | ||
| 1738 | set_current_state (TASK_INTERRUPTIBLE); | ||
| 1739 | add_wait_queue (&cp->bufferwait, &wait); | ||
| 1740 | |||
| 1741 | /* Try to get a buffer from the device pool. | ||
| 1742 | We can't use a buffer from ccp->bufctl because the write | ||
| 1743 | command will last beond a release() */ | ||
| 1744 | bp = NULL; | ||
| 1745 | spin_lock_irqsave (&cp->bufctl.lock, flags); | ||
| 1746 | if (!list_empty (&cp->bufctl.free_buff_list)) { | ||
| 1747 | /* yes: get the entry */ | ||
| 1748 | struct list_head *tmp = cp->bufctl.free_buff_list.next; | ||
| 1749 | list_del (tmp); | ||
| 1750 | bp = list_entry (tmp, auerbuf_t, buff_list); | ||
| 1751 | } | ||
| 1752 | spin_unlock_irqrestore (&cp->bufctl.lock, flags); | ||
| 1753 | |||
| 1754 | /* are there any buffers left? */ | ||
| 1755 | if (!bp) { | ||
| 1756 | mutex_unlock(&cp->mutex); | ||
| 1757 | mutex_unlock(&ccp->mutex); | ||
| 1758 | |||
| 1759 | /* NONBLOCK: don't wait */ | ||
| 1760 | if (file->f_flags & O_NONBLOCK) { | ||
| 1761 | set_current_state (TASK_RUNNING); | ||
| 1762 | remove_wait_queue (&cp->bufferwait, &wait); | ||
| 1763 | return -EAGAIN; | ||
| 1764 | } | ||
| 1765 | |||
| 1766 | /* BLOCKING: wait */ | ||
| 1767 | schedule(); | ||
| 1768 | remove_wait_queue (&cp->bufferwait, &wait); | ||
| 1769 | if (signal_pending (current)) { | ||
| 1770 | /* waked up by a signal */ | ||
| 1771 | return -ERESTARTSYS; | ||
| 1772 | } | ||
| 1773 | goto write_again; | ||
| 1774 | } else { | ||
| 1775 | set_current_state (TASK_RUNNING); | ||
| 1776 | remove_wait_queue (&cp->bufferwait, &wait); | ||
| 1777 | } | ||
| 1778 | |||
| 1779 | /* protect against too big write requests */ | ||
| 1780 | if (len > cp->maxControlLength) | ||
| 1781 | len = cp->maxControlLength; | ||
| 1782 | |||
| 1783 | /* Fill the buffer */ | ||
| 1784 | if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) { | ||
| 1785 | dbg ("copy_from_user failed"); | ||
| 1786 | auerbuf_releasebuf (bp); | ||
| 1787 | /* Wake up all processes waiting for a buffer */ | ||
| 1788 | wake_up (&cp->bufferwait); | ||
| 1789 | mutex_unlock(&cp->mutex); | ||
| 1790 | mutex_unlock(&ccp->mutex); | ||
| 1791 | return -EFAULT; | ||
| 1792 | } | ||
| 1793 | |||
| 1794 | /* set the header byte */ | ||
| 1795 | *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT; | ||
| 1796 | |||
| 1797 | /* Set the transfer Parameters */ | ||
| 1798 | bp->len = len+AUH_SIZE; | ||
| 1799 | bp->dr->bRequestType = AUT_WREQ; | ||
| 1800 | bp->dr->bRequest = AUV_WBLOCK; | ||
| 1801 | bp->dr->wValue = cpu_to_le16 (0); | ||
| 1802 | bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT); | ||
| 1803 | bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE); | ||
| 1804 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0), | ||
| 1805 | (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE, | ||
| 1806 | auerchar_ctrlwrite_complete, bp); | ||
| 1807 | /* up we go */ | ||
| 1808 | ret = auerchain_submit_urb (&cp->controlchain, bp->urbp); | ||
| 1809 | mutex_unlock(&cp->mutex); | ||
| 1810 | if (ret) { | ||
| 1811 | dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret); | ||
| 1812 | auerbuf_releasebuf (bp); | ||
| 1813 | /* Wake up all processes waiting for a buffer */ | ||
| 1814 | wake_up (&cp->bufferwait); | ||
| 1815 | mutex_unlock(&ccp->mutex); | ||
| 1816 | return -EIO; | ||
| 1817 | } | ||
| 1818 | else { | ||
| 1819 | dbg ("auerchar_write: Write OK"); | ||
| 1820 | mutex_unlock(&ccp->mutex); | ||
| 1821 | return len; | ||
| 1822 | } | ||
| 1823 | } | ||
| 1824 | |||
| 1825 | |||
| 1826 | /* Close a character device */ | ||
| 1827 | static int auerchar_release (struct inode *inode, struct file *file) | ||
| 1828 | { | ||
| 1829 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
| 1830 | pauerswald_t cp; | ||
| 1831 | dbg("release"); | ||
| 1832 | |||
| 1833 | mutex_lock(&ccp->mutex); | ||
| 1834 | cp = ccp->auerdev; | ||
| 1835 | if (cp) { | ||
| 1836 | mutex_lock(&cp->mutex); | ||
| 1837 | /* remove an open service */ | ||
| 1838 | auerswald_removeservice (cp, &ccp->scontext); | ||
| 1839 | /* detach from device */ | ||
| 1840 | if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) { | ||
| 1841 | /* usb device waits for removal */ | ||
| 1842 | mutex_unlock(&cp->mutex); | ||
| 1843 | auerswald_delete (cp); | ||
| 1844 | } else { | ||
| 1845 | mutex_unlock(&cp->mutex); | ||
| 1846 | } | ||
| 1847 | cp = NULL; | ||
| 1848 | ccp->auerdev = NULL; | ||
| 1849 | } | ||
| 1850 | mutex_unlock(&ccp->mutex); | ||
| 1851 | auerchar_delete (ccp); | ||
| 1852 | |||
| 1853 | return 0; | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | |||
| 1857 | /*----------------------------------------------------------------------*/ | ||
| 1858 | /* File operation structure */ | ||
| 1859 | static const struct file_operations auerswald_fops = | ||
| 1860 | { | ||
| 1861 | .owner = THIS_MODULE, | ||
| 1862 | .llseek = no_llseek, | ||
| 1863 | .read = auerchar_read, | ||
| 1864 | .write = auerchar_write, | ||
| 1865 | .unlocked_ioctl = auerchar_ioctl, | ||
| 1866 | .open = auerchar_open, | ||
| 1867 | .release = auerchar_release, | ||
| 1868 | }; | ||
| 1869 | |||
| 1870 | static struct usb_class_driver auerswald_class = { | ||
| 1871 | .name = "auer%d", | ||
| 1872 | .fops = &auerswald_fops, | ||
| 1873 | .minor_base = AUER_MINOR_BASE, | ||
| 1874 | }; | ||
| 1875 | |||
| 1876 | |||
| 1877 | /* --------------------------------------------------------------------- */ | ||
| 1878 | /* Special USB driver functions */ | ||
| 1879 | |||
| 1880 | /* Probe if this driver wants to serve an USB device | ||
| 1881 | |||
| 1882 | This entry point is called whenever a new device is attached to the bus. | ||
| 1883 | Then the device driver has to create a new instance of its internal data | ||
| 1884 | structures for the new device. | ||
| 1885 | |||
| 1886 | The dev argument specifies the device context, which contains pointers | ||
| 1887 | to all USB descriptors. The interface argument specifies the interface | ||
| 1888 | number. If a USB driver wants to bind itself to a particular device and | ||
| 1889 | interface it has to return a pointer. This pointer normally references | ||
| 1890 | the device driver's context structure. | ||
| 1891 | |||
| 1892 | Probing normally is done by checking the vendor and product identifications | ||
| 1893 | or the class and subclass definitions. If they match the interface number | ||
| 1894 | is compared with the ones supported by the driver. When probing is done | ||
| 1895 | class based it might be necessary to parse some more USB descriptors because | ||
| 1896 | the device properties can differ in a wide range. | ||
| 1897 | */ | ||
| 1898 | static int auerswald_probe (struct usb_interface *intf, | ||
| 1899 | const struct usb_device_id *id) | ||
| 1900 | { | ||
| 1901 | struct usb_device *usbdev = interface_to_usbdev(intf); | ||
| 1902 | pauerswald_t cp = NULL; | ||
| 1903 | unsigned int u = 0; | ||
| 1904 | __le16 *pbuf; | ||
| 1905 | int ret; | ||
| 1906 | |||
| 1907 | dbg ("probe: vendor id 0x%x, device id 0x%x", | ||
| 1908 | le16_to_cpu(usbdev->descriptor.idVendor), | ||
| 1909 | le16_to_cpu(usbdev->descriptor.idProduct)); | ||
| 1910 | |||
| 1911 | /* we use only the first -and only- interface */ | ||
| 1912 | if (intf->altsetting->desc.bInterfaceNumber != 0) | ||
| 1913 | return -ENODEV; | ||
| 1914 | |||
| 1915 | /* allocate memory for our device and initialize it */ | ||
| 1916 | cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL); | ||
| 1917 | if (cp == NULL) { | ||
| 1918 | err ("out of memory"); | ||
| 1919 | goto pfail; | ||
| 1920 | } | ||
| 1921 | |||
| 1922 | /* Initialize device descriptor */ | ||
| 1923 | mutex_init(&cp->mutex); | ||
| 1924 | cp->usbdev = usbdev; | ||
| 1925 | auerchain_init (&cp->controlchain); | ||
| 1926 | auerbuf_init (&cp->bufctl); | ||
| 1927 | init_waitqueue_head (&cp->bufferwait); | ||
| 1928 | |||
| 1929 | ret = usb_register_dev(intf, &auerswald_class); | ||
| 1930 | if (ret) { | ||
| 1931 | err ("Not able to get a minor for this device."); | ||
| 1932 | goto pfail; | ||
| 1933 | } | ||
| 1934 | |||
| 1935 | /* Give the device a name */ | ||
| 1936 | sprintf (cp->name, "usb/auer%d", intf->minor); | ||
| 1937 | |||
| 1938 | /* Store the index */ | ||
| 1939 | cp->dtindex = intf->minor; | ||
| 1940 | |||
| 1941 | /* Get the usb version of the device */ | ||
| 1942 | cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice); | ||
| 1943 | dbg ("Version is %X", cp->version); | ||
| 1944 | |||
| 1945 | /* allow some time to settle the device */ | ||
| 1946 | msleep(334); | ||
| 1947 | |||
| 1948 | /* Try to get a suitable textual description of the device */ | ||
| 1949 | /* Device name:*/ | ||
| 1950 | ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1); | ||
| 1951 | if (ret >= 0) { | ||
| 1952 | u += ret; | ||
| 1953 | /* Append Serial Number */ | ||
| 1954 | memcpy(&cp->dev_desc[u], ",Ser# ", 6); | ||
| 1955 | u += 6; | ||
| 1956 | ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1); | ||
| 1957 | if (ret >= 0) { | ||
| 1958 | u += ret; | ||
| 1959 | /* Append subscriber number */ | ||
| 1960 | memcpy(&cp->dev_desc[u], ", ", 2); | ||
| 1961 | u += 2; | ||
| 1962 | ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1); | ||
| 1963 | if (ret >= 0) { | ||
| 1964 | u += ret; | ||
| 1965 | } | ||
| 1966 | } | ||
| 1967 | } | ||
| 1968 | cp->dev_desc[u] = '\0'; | ||
| 1969 | info("device is a %s", cp->dev_desc); | ||
| 1970 | |||
| 1971 | /* get the maximum allowed control transfer length */ | ||
| 1972 | pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */ | ||
| 1973 | if (!pbuf) { | ||
| 1974 | err( "out of memory"); | ||
| 1975 | goto pfail; | ||
| 1976 | } | ||
| 1977 | ret = usb_control_msg(cp->usbdev, /* pointer to device */ | ||
| 1978 | usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */ | ||
| 1979 | AUV_GETINFO, /* USB message request value */ | ||
| 1980 | AUT_RREQ, /* USB message request type value */ | ||
| 1981 | 0, /* USB message value */ | ||
| 1982 | AUDI_MBCTRANS, /* USB message index value */ | ||
| 1983 | pbuf, /* pointer to the receive buffer */ | ||
| 1984 | 2, /* length of the buffer */ | ||
| 1985 | 2000); /* time to wait for the message to complete before timing out */ | ||
| 1986 | if (ret == 2) { | ||
| 1987 | cp->maxControlLength = le16_to_cpup(pbuf); | ||
| 1988 | kfree(pbuf); | ||
| 1989 | dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength); | ||
| 1990 | } else { | ||
| 1991 | kfree(pbuf); | ||
| 1992 | err("setup: getting max. allowed control transfer length failed with error %d", ret); | ||
| 1993 | goto pfail; | ||
| 1994 | } | ||
| 1995 | |||
| 1996 | /* allocate a chain for the control messages */ | ||
| 1997 | if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) { | ||
| 1998 | err ("out of memory"); | ||
| 1999 | goto pfail; | ||
| 2000 | } | ||
| 2001 | |||
| 2002 | /* allocate buffers for control messages */ | ||
| 2003 | if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) { | ||
| 2004 | err ("out of memory"); | ||
| 2005 | goto pfail; | ||
| 2006 | } | ||
| 2007 | |||
| 2008 | /* start the interrupt endpoint */ | ||
| 2009 | if (auerswald_int_open (cp)) { | ||
| 2010 | err ("int endpoint failed"); | ||
| 2011 | goto pfail; | ||
| 2012 | } | ||
| 2013 | |||
| 2014 | /* all OK */ | ||
| 2015 | usb_set_intfdata (intf, cp); | ||
| 2016 | return 0; | ||
| 2017 | |||
| 2018 | /* Error exit: clean up the memory */ | ||
| 2019 | pfail: auerswald_delete (cp); | ||
| 2020 | return -EIO; | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | |||
| 2024 | /* Disconnect driver from a served device | ||
| 2025 | |||
| 2026 | This function is called whenever a device which was served by this driver | ||
| 2027 | is disconnected. | ||
| 2028 | |||
| 2029 | The argument dev specifies the device context and the driver_context | ||
| 2030 | returns a pointer to the previously registered driver_context of the | ||
| 2031 | probe function. After returning from the disconnect function the USB | ||
| 2032 | framework completely deallocates all data structures associated with | ||
| 2033 | this device. So especially the usb_device structure must not be used | ||
| 2034 | any longer by the usb driver. | ||
| 2035 | */ | ||
| 2036 | static void auerswald_disconnect (struct usb_interface *intf) | ||
| 2037 | { | ||
| 2038 | pauerswald_t cp = usb_get_intfdata (intf); | ||
| 2039 | unsigned int u; | ||
| 2040 | |||
| 2041 | usb_set_intfdata (intf, NULL); | ||
| 2042 | if (!cp) | ||
| 2043 | return; | ||
| 2044 | |||
| 2045 | /* give back our USB minor number */ | ||
| 2046 | usb_deregister_dev(intf, &auerswald_class); | ||
| 2047 | |||
| 2048 | mutex_lock(&cp->mutex); | ||
| 2049 | info ("device /dev/%s now disconnecting", cp->name); | ||
| 2050 | |||
| 2051 | /* Stop the interrupt endpoint */ | ||
| 2052 | auerswald_int_release (cp); | ||
| 2053 | |||
| 2054 | /* remove the control chain allocated in auerswald_probe | ||
| 2055 | This has the benefit of | ||
| 2056 | a) all pending (a)synchronous urbs are unlinked | ||
| 2057 | b) all buffers dealing with urbs are reclaimed | ||
| 2058 | */ | ||
| 2059 | auerchain_free (&cp->controlchain); | ||
| 2060 | |||
| 2061 | if (cp->open_count == 0) { | ||
| 2062 | /* nobody is using this device. So we can clean up now */ | ||
| 2063 | mutex_unlock(&cp->mutex); | ||
| 2064 | /* mutex_unlock() is possible here because no other task | ||
| 2065 | can open the device (see above). I don't want | ||
| 2066 | to kfree() a locked mutex. */ | ||
| 2067 | |||
| 2068 | auerswald_delete (cp); | ||
| 2069 | } else { | ||
| 2070 | /* device is used. Remove the pointer to the | ||
| 2071 | usb device (it's not valid any more). The last | ||
| 2072 | release() will do the clean up */ | ||
| 2073 | cp->usbdev = NULL; | ||
| 2074 | mutex_unlock(&cp->mutex); | ||
| 2075 | /* Terminate waiting writers */ | ||
| 2076 | wake_up (&cp->bufferwait); | ||
| 2077 | /* Inform all waiting readers */ | ||
| 2078 | for ( u = 0; u < AUH_TYPESIZE; u++) { | ||
| 2079 | pauerscon_t scp = cp->services[u]; | ||
| 2080 | if (scp) | ||
| 2081 | scp->disconnect( scp); | ||
| 2082 | } | ||
| 2083 | } | ||
| 2084 | } | ||
| 2085 | |||
| 2086 | /* Descriptor for the devices which are served by this driver. | ||
| 2087 | NOTE: this struct is parsed by the usbmanager install scripts. | ||
| 2088 | Don't change without caution! | ||
| 2089 | */ | ||
| 2090 | static struct usb_device_id auerswald_ids [] = { | ||
| 2091 | { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */ | ||
| 2092 | { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */ | ||
| 2093 | { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */ | ||
| 2094 | { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */ | ||
| 2095 | { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */ | ||
| 2096 | { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */ | ||
| 2097 | { } /* Terminating entry */ | ||
| 2098 | }; | ||
| 2099 | |||
| 2100 | /* Standard module device table */ | ||
| 2101 | MODULE_DEVICE_TABLE (usb, auerswald_ids); | ||
| 2102 | |||
| 2103 | /* Standard usb driver struct */ | ||
| 2104 | static struct usb_driver auerswald_driver = { | ||
| 2105 | .name = "auerswald", | ||
| 2106 | .probe = auerswald_probe, | ||
| 2107 | .disconnect = auerswald_disconnect, | ||
| 2108 | .id_table = auerswald_ids, | ||
| 2109 | }; | ||
| 2110 | |||
| 2111 | |||
| 2112 | /* --------------------------------------------------------------------- */ | ||
| 2113 | /* Module loading/unloading */ | ||
| 2114 | |||
| 2115 | /* Driver initialisation. Called after module loading. | ||
| 2116 | NOTE: there is no concurrency at _init | ||
| 2117 | */ | ||
| 2118 | static int __init auerswald_init (void) | ||
| 2119 | { | ||
| 2120 | int result; | ||
| 2121 | dbg ("init"); | ||
| 2122 | |||
| 2123 | /* register driver at the USB subsystem */ | ||
| 2124 | result = usb_register (&auerswald_driver); | ||
| 2125 | if (result < 0) { | ||
| 2126 | err ("driver could not be registered"); | ||
| 2127 | return -1; | ||
| 2128 | } | ||
| 2129 | return 0; | ||
| 2130 | } | ||
| 2131 | |||
| 2132 | /* Driver deinit. Called before module removal. | ||
| 2133 | NOTE: there is no concurrency at _cleanup | ||
| 2134 | */ | ||
| 2135 | static void __exit auerswald_cleanup (void) | ||
| 2136 | { | ||
| 2137 | dbg ("cleanup"); | ||
| 2138 | usb_deregister (&auerswald_driver); | ||
| 2139 | } | ||
| 2140 | |||
| 2141 | /* --------------------------------------------------------------------- */ | ||
| 2142 | /* Linux device driver module description */ | ||
| 2143 | |||
| 2144 | MODULE_AUTHOR (DRIVER_AUTHOR); | ||
| 2145 | MODULE_DESCRIPTION (DRIVER_DESC); | ||
| 2146 | MODULE_LICENSE ("GPL"); | ||
| 2147 | |||
| 2148 | module_init (auerswald_init); | ||
| 2149 | module_exit (auerswald_cleanup); | ||
| 2150 | |||
| 2151 | /* --------------------------------------------------------------------- */ | ||
| 2152 | |||
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig new file mode 100644 index 000000000000..faca4333f27a --- /dev/null +++ b/drivers/usb/musb/Kconfig | |||
| @@ -0,0 +1,176 @@ | |||
| 1 | # | ||
| 2 | # USB Dual Role (OTG-ready) Controller Drivers | ||
| 3 | # for silicon based on Mentor Graphics INVENTRA designs | ||
| 4 | # | ||
| 5 | |||
| 6 | comment "Enable Host or Gadget support to see Inventra options" | ||
| 7 | depends on !USB && USB_GADGET=n | ||
| 8 | |||
| 9 | # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller | ||
| 10 | config USB_MUSB_HDRC | ||
| 11 | depends on (USB || USB_GADGET) && HAVE_CLK | ||
| 12 | select TWL4030_USB if MACH_OMAP_3430SDP | ||
| 13 | tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' | ||
| 14 | help | ||
| 15 | Say Y here if your system has a dual role high speed USB | ||
| 16 | controller based on the Mentor Graphics silicon IP. Then | ||
| 17 | configure options to match your silicon and the board | ||
| 18 | it's being used with, including the USB peripheral role, | ||
| 19 | or the USB host role, or both. | ||
| 20 | |||
| 21 | Texas Instruments parts using this IP include DaVinci 644x, | ||
| 22 | OMAP 243x, OMAP 343x, and TUSB 6010. | ||
| 23 | |||
| 24 | If you do not know what this is, please say N. | ||
| 25 | |||
| 26 | To compile this driver as a module, choose M here; the | ||
| 27 | module will be called "musb_hdrc". | ||
| 28 | |||
| 29 | config USB_MUSB_SOC | ||
| 30 | boolean | ||
| 31 | depends on USB_MUSB_HDRC | ||
| 32 | default y if ARCH_DAVINCI | ||
| 33 | default y if ARCH_OMAP2430 | ||
| 34 | default y if ARCH_OMAP34XX | ||
| 35 | help | ||
| 36 | Use a static <asm/arch/hdrc_cnf.h> file to describe how the | ||
| 37 | controller is configured (endpoints, mechanisms, etc) on the | ||
| 38 | current iteration of a given system-on-chip. | ||
| 39 | |||
| 40 | comment "DaVinci 644x USB support" | ||
| 41 | depends on USB_MUSB_HDRC && ARCH_DAVINCI | ||
| 42 | |||
| 43 | comment "OMAP 243x high speed USB support" | ||
| 44 | depends on USB_MUSB_HDRC && ARCH_OMAP2430 | ||
| 45 | |||
| 46 | comment "OMAP 343x high speed USB support" | ||
| 47 | depends on USB_MUSB_HDRC && ARCH_OMAP34XX | ||
| 48 | |||
| 49 | config USB_TUSB6010 | ||
| 50 | boolean "TUSB 6010 support" | ||
| 51 | depends on USB_MUSB_HDRC && !USB_MUSB_SOC | ||
| 52 | default y | ||
| 53 | help | ||
| 54 | The TUSB 6010 chip, from Texas Instruments, connects a discrete | ||
| 55 | HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ | ||
| 56 | (a high speed serial link). It can use system-specific external | ||
| 57 | DMA controllers. | ||
| 58 | |||
| 59 | choice | ||
| 60 | prompt "Driver Mode" | ||
| 61 | depends on USB_MUSB_HDRC | ||
| 62 | help | ||
| 63 | Dual-Role devices can support both host and peripheral roles, | ||
| 64 | as well as a the special "OTG Device" role which can switch | ||
| 65 | between both roles as needed. | ||
| 66 | |||
| 67 | # use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support; | ||
| 68 | # OTG needs both roles, not just USB_MUSB_HOST. | ||
| 69 | config USB_MUSB_HOST | ||
| 70 | depends on USB | ||
| 71 | bool "USB Host" | ||
| 72 | help | ||
| 73 | Say Y here if your system supports the USB host role. | ||
| 74 | If it has a USB "A" (rectangular), "Mini-A" (uncommon), | ||
| 75 | or "Mini-AB" connector, it supports the host role. | ||
| 76 | (With a "Mini-AB" connector, you should enable USB OTG.) | ||
| 77 | |||
| 78 | # use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral | ||
| 79 | # side support ... OTG needs both roles | ||
| 80 | config USB_MUSB_PERIPHERAL | ||
| 81 | depends on USB_GADGET | ||
| 82 | bool "USB Peripheral (gadget stack)" | ||
| 83 | select USB_GADGET_MUSB_HDRC | ||
| 84 | help | ||
| 85 | Say Y here if your system supports the USB peripheral role. | ||
| 86 | If it has a USB "B" (squarish), "Mini-B", or "Mini-AB" | ||
| 87 | connector, it supports the peripheral role. | ||
| 88 | (With a "Mini-AB" connector, you should enable USB OTG.) | ||
| 89 | |||
| 90 | config USB_MUSB_OTG | ||
| 91 | depends on USB && USB_GADGET && PM && EXPERIMENTAL | ||
| 92 | bool "Both host and peripheral: USB OTG (On The Go) Device" | ||
| 93 | select USB_GADGET_MUSB_HDRC | ||
| 94 | select USB_OTG | ||
| 95 | help | ||
| 96 | The most notable feature of USB OTG is support for a | ||
| 97 | "Dual-Role" device, which can act as either a device | ||
| 98 | or a host. The initial role choice can be changed | ||
| 99 | later, when two dual-role devices talk to each other. | ||
| 100 | |||
| 101 | At this writing, the OTG support in this driver is incomplete, | ||
| 102 | omitting the mandatory HNP or SRP protocols. However, some | ||
| 103 | of the cable based role switching works. (That is, grounding | ||
| 104 | the ID pin switches the controller to host mode, while leaving | ||
| 105 | it floating leaves it in peripheral mode.) | ||
| 106 | |||
| 107 | Select this if your system has a Mini-AB connector, or | ||
| 108 | to simplify certain kinds of configuration. | ||
| 109 | |||
| 110 | To implement your OTG Targeted Peripherals List (TPL), enable | ||
| 111 | USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h" | ||
| 112 | to match your requirements. | ||
| 113 | |||
| 114 | endchoice | ||
| 115 | |||
| 116 | # enable peripheral support (including with OTG) | ||
| 117 | config USB_GADGET_MUSB_HDRC | ||
| 118 | bool | ||
| 119 | depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) | ||
| 120 | # default y | ||
| 121 | # select USB_GADGET_DUALSPEED | ||
| 122 | # select USB_GADGET_SELECTED | ||
| 123 | |||
| 124 | # enables host support (including with OTG) | ||
| 125 | config USB_MUSB_HDRC_HCD | ||
| 126 | bool | ||
| 127 | depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG) | ||
| 128 | select USB_OTG if USB_GADGET_MUSB_HDRC | ||
| 129 | default y | ||
| 130 | |||
| 131 | |||
| 132 | config MUSB_PIO_ONLY | ||
| 133 | bool 'Disable DMA (always use PIO)' | ||
| 134 | depends on USB_MUSB_HDRC | ||
| 135 | default y if USB_TUSB6010 | ||
| 136 | help | ||
| 137 | All data is copied between memory and FIFO by the CPU. | ||
| 138 | DMA controllers are ignored. | ||
| 139 | |||
| 140 | Do not select 'n' here unless DMA support for your SOC or board | ||
| 141 | is unavailable (or unstable). When DMA is enabled at compile time, | ||
| 142 | you can still disable it at run time using the "use_dma=n" module | ||
| 143 | parameter. | ||
| 144 | |||
| 145 | config USB_INVENTRA_DMA | ||
| 146 | bool | ||
| 147 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
| 148 | default ARCH_OMAP2430 || ARCH_OMAP34XX | ||
| 149 | help | ||
| 150 | Enable DMA transfers using Mentor's engine. | ||
| 151 | |||
| 152 | config USB_TI_CPPI_DMA | ||
| 153 | bool | ||
| 154 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
| 155 | default ARCH_DAVINCI | ||
| 156 | help | ||
| 157 | Enable DMA transfers when TI CPPI DMA is available. | ||
| 158 | |||
| 159 | config USB_TUSB_OMAP_DMA | ||
| 160 | bool | ||
| 161 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
| 162 | depends on USB_TUSB6010 | ||
| 163 | depends on ARCH_OMAP | ||
| 164 | default y | ||
| 165 | help | ||
| 166 | Enable DMA transfers on TUSB 6010 when OMAP DMA is available. | ||
| 167 | |||
| 168 | config USB_MUSB_LOGLEVEL | ||
| 169 | depends on USB_MUSB_HDRC | ||
| 170 | int 'Logging Level (0 - none / 3 - annoying / ... )' | ||
| 171 | default 0 | ||
| 172 | help | ||
| 173 | Set the logging level. 0 disables the debugging altogether, | ||
| 174 | although when USB_DEBUG is set the value is at least 1. | ||
| 175 | Starting at level 3, per-transfer (urb, usb_request, packet, | ||
| 176 | or dma transfer) tracing may kick in. | ||
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile new file mode 100644 index 000000000000..88eb67de08ae --- /dev/null +++ b/drivers/usb/musb/Makefile | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | # | ||
| 2 | # for USB OTG silicon based on Mentor Graphics INVENTRA designs | ||
| 3 | # | ||
| 4 | |||
| 5 | musb_hdrc-objs := musb_core.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o | ||
| 8 | |||
| 9 | ifeq ($(CONFIG_ARCH_DAVINCI),y) | ||
| 10 | musb_hdrc-objs += davinci.o | ||
| 11 | endif | ||
| 12 | |||
| 13 | ifeq ($(CONFIG_USB_TUSB6010),y) | ||
| 14 | musb_hdrc-objs += tusb6010.o | ||
| 15 | endif | ||
| 16 | |||
| 17 | ifeq ($(CONFIG_ARCH_OMAP2430),y) | ||
| 18 | musb_hdrc-objs += omap2430.o | ||
| 19 | endif | ||
| 20 | |||
| 21 | ifeq ($(CONFIG_ARCH_OMAP3430),y) | ||
| 22 | musb_hdrc-objs += omap2430.o | ||
| 23 | endif | ||
| 24 | |||
| 25 | ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y) | ||
| 26 | musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o | ||
| 27 | endif | ||
| 28 | |||
| 29 | ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y) | ||
| 30 | musb_hdrc-objs += musb_virthub.o musb_host.o | ||
| 31 | endif | ||
| 32 | |||
| 33 | # the kconfig must guarantee that only one of the | ||
| 34 | # possible I/O schemes will be enabled at a time ... | ||
| 35 | # PIO only, or DMA (several potential schemes). | ||
| 36 | # though PIO is always there to back up DMA, and for ep0 | ||
| 37 | |||
| 38 | ifneq ($(CONFIG_MUSB_PIO_ONLY),y) | ||
| 39 | |||
| 40 | ifeq ($(CONFIG_USB_INVENTRA_DMA),y) | ||
| 41 | musb_hdrc-objs += musbhsdma.o | ||
| 42 | |||
| 43 | else | ||
| 44 | ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) | ||
| 45 | musb_hdrc-objs += cppi_dma.o | ||
| 46 | |||
| 47 | else | ||
| 48 | ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) | ||
| 49 | musb_hdrc-objs += tusb6010_omap.o | ||
| 50 | |||
| 51 | endif | ||
| 52 | endif | ||
| 53 | endif | ||
| 54 | endif | ||
| 55 | |||
| 56 | |||
| 57 | ################################################################################ | ||
| 58 | |||
| 59 | # FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_* | ||
| 60 | |||
| 61 | ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y) | ||
| 62 | EXTRA_CFLAGS += -DMUSB_AHB_ID | ||
| 63 | endif | ||
| 64 | |||
| 65 | # Debugging | ||
| 66 | |||
| 67 | MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL) | ||
| 68 | |||
| 69 | ifeq ("$(strip $(MUSB_DEBUG))","") | ||
| 70 | ifdef CONFIG_USB_DEBUG | ||
| 71 | MUSB_DEBUG:=1 | ||
| 72 | else | ||
| 73 | MUSB_DEBUG:=0 | ||
| 74 | endif | ||
| 75 | endif | ||
| 76 | |||
| 77 | ifneq ($(MUSB_DEBUG),0) | ||
| 78 | EXTRA_CFLAGS += -DDEBUG | ||
| 79 | |||
| 80 | ifeq ($(CONFIG_PROC_FS),y) | ||
| 81 | musb_hdrc-objs += musb_procfs.o | ||
| 82 | endif | ||
| 83 | |||
| 84 | endif | ||
| 85 | |||
| 86 | EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG) | ||
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c new file mode 100644 index 000000000000..5ad6d0893cbe --- /dev/null +++ b/drivers/usb/musb/cppi_dma.c | |||
| @@ -0,0 +1,1540 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 3 | * | ||
| 4 | * This file implements a DMA interface using TI's CPPI DMA. | ||
| 5 | * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. | ||
| 6 | * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/usb.h> | ||
| 10 | |||
| 11 | #include "musb_core.h" | ||
| 12 | #include "cppi_dma.h" | ||
| 13 | |||
| 14 | |||
| 15 | /* CPPI DMA status 7-mar-2006: | ||
| 16 | * | ||
| 17 | * - See musb_{host,gadget}.c for more info | ||
| 18 | * | ||
| 19 | * - Correct RX DMA generally forces the engine into irq-per-packet mode, | ||
| 20 | * which can easily saturate the CPU under non-mass-storage loads. | ||
| 21 | * | ||
| 22 | * NOTES 24-aug-2006 (2.6.18-rc4): | ||
| 23 | * | ||
| 24 | * - peripheral RXDMA wedged in a test with packets of length 512/512/1. | ||
| 25 | * evidently after the 1 byte packet was received and acked, the queue | ||
| 26 | * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, | ||
| 27 | * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 | ||
| 28 | * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx | ||
| 29 | * of its next (512 byte) packet. IRQ issues? | ||
| 30 | * | ||
| 31 | * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will | ||
| 32 | * evidently also directly update the RX and TX CSRs ... so audit all | ||
| 33 | * host and peripheral side DMA code to avoid CSR access after DMA has | ||
| 34 | * been started. | ||
| 35 | */ | ||
| 36 | |||
| 37 | /* REVISIT now we can avoid preallocating these descriptors; or | ||
| 38 | * more simply, switch to a global freelist not per-channel ones. | ||
| 39 | * Note: at full speed, 64 descriptors == 4K bulk data. | ||
| 40 | */ | ||
| 41 | #define NUM_TXCHAN_BD 64 | ||
| 42 | #define NUM_RXCHAN_BD 64 | ||
| 43 | |||
| 44 | static inline void cpu_drain_writebuffer(void) | ||
| 45 | { | ||
| 46 | wmb(); | ||
| 47 | #ifdef CONFIG_CPU_ARM926T | ||
| 48 | /* REVISIT this "should not be needed", | ||
| 49 | * but lack of it sure seemed to hurt ... | ||
| 50 | */ | ||
| 51 | asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) | ||
| 56 | { | ||
| 57 | struct cppi_descriptor *bd = c->freelist; | ||
| 58 | |||
| 59 | if (bd) | ||
| 60 | c->freelist = bd->next; | ||
| 61 | return bd; | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline void | ||
| 65 | cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) | ||
| 66 | { | ||
| 67 | if (!bd) | ||
| 68 | return; | ||
| 69 | bd->next = c->freelist; | ||
| 70 | c->freelist = bd; | ||
| 71 | } | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Start DMA controller | ||
| 75 | * | ||
| 76 | * Initialize the DMA controller as necessary. | ||
| 77 | */ | ||
| 78 | |||
| 79 | /* zero out entire rx state RAM entry for the channel */ | ||
| 80 | static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) | ||
| 81 | { | ||
| 82 | musb_writel(&rx->rx_skipbytes, 0, 0); | ||
| 83 | musb_writel(&rx->rx_head, 0, 0); | ||
| 84 | musb_writel(&rx->rx_sop, 0, 0); | ||
| 85 | musb_writel(&rx->rx_current, 0, 0); | ||
| 86 | musb_writel(&rx->rx_buf_current, 0, 0); | ||
| 87 | musb_writel(&rx->rx_len_len, 0, 0); | ||
| 88 | musb_writel(&rx->rx_cnt_cnt, 0, 0); | ||
| 89 | } | ||
| 90 | |||
| 91 | /* zero out entire tx state RAM entry for the channel */ | ||
| 92 | static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) | ||
| 93 | { | ||
| 94 | musb_writel(&tx->tx_head, 0, 0); | ||
| 95 | musb_writel(&tx->tx_buf, 0, 0); | ||
| 96 | musb_writel(&tx->tx_current, 0, 0); | ||
| 97 | musb_writel(&tx->tx_buf_current, 0, 0); | ||
| 98 | musb_writel(&tx->tx_info, 0, 0); | ||
| 99 | musb_writel(&tx->tx_rem_len, 0, 0); | ||
| 100 | /* musb_writel(&tx->tx_dummy, 0, 0); */ | ||
| 101 | musb_writel(&tx->tx_complete, 0, ptr); | ||
| 102 | } | ||
| 103 | |||
| 104 | static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) | ||
| 105 | { | ||
| 106 | int j; | ||
| 107 | |||
| 108 | /* initialize channel fields */ | ||
| 109 | c->head = NULL; | ||
| 110 | c->tail = NULL; | ||
| 111 | c->last_processed = NULL; | ||
| 112 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 113 | c->controller = cppi; | ||
| 114 | c->is_rndis = 0; | ||
| 115 | c->freelist = NULL; | ||
| 116 | |||
| 117 | /* build the BD Free list for the channel */ | ||
| 118 | for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { | ||
| 119 | struct cppi_descriptor *bd; | ||
| 120 | dma_addr_t dma; | ||
| 121 | |||
| 122 | bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); | ||
| 123 | bd->dma = dma; | ||
| 124 | cppi_bd_free(c, bd); | ||
| 125 | } | ||
| 126 | } | ||
| 127 | |||
| 128 | static int cppi_channel_abort(struct dma_channel *); | ||
| 129 | |||
| 130 | static void cppi_pool_free(struct cppi_channel *c) | ||
| 131 | { | ||
| 132 | struct cppi *cppi = c->controller; | ||
| 133 | struct cppi_descriptor *bd; | ||
| 134 | |||
| 135 | (void) cppi_channel_abort(&c->channel); | ||
| 136 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 137 | c->controller = NULL; | ||
| 138 | |||
| 139 | /* free all its bds */ | ||
| 140 | bd = c->last_processed; | ||
| 141 | do { | ||
| 142 | if (bd) | ||
| 143 | dma_pool_free(cppi->pool, bd, bd->dma); | ||
| 144 | bd = cppi_bd_alloc(c); | ||
| 145 | } while (bd); | ||
| 146 | c->last_processed = NULL; | ||
| 147 | } | ||
| 148 | |||
| 149 | static int __init cppi_controller_start(struct dma_controller *c) | ||
| 150 | { | ||
| 151 | struct cppi *controller; | ||
| 152 | void __iomem *tibase; | ||
| 153 | int i; | ||
| 154 | |||
| 155 | controller = container_of(c, struct cppi, controller); | ||
| 156 | |||
| 157 | /* do whatever is necessary to start controller */ | ||
| 158 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | ||
| 159 | controller->tx[i].transmit = true; | ||
| 160 | controller->tx[i].index = i; | ||
| 161 | } | ||
| 162 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { | ||
| 163 | controller->rx[i].transmit = false; | ||
| 164 | controller->rx[i].index = i; | ||
| 165 | } | ||
| 166 | |||
| 167 | /* setup BD list on a per channel basis */ | ||
| 168 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) | ||
| 169 | cppi_pool_init(controller, controller->tx + i); | ||
| 170 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) | ||
| 171 | cppi_pool_init(controller, controller->rx + i); | ||
| 172 | |||
| 173 | tibase = controller->tibase; | ||
| 174 | INIT_LIST_HEAD(&controller->tx_complete); | ||
| 175 | |||
| 176 | /* initialise tx/rx channel head pointers to zero */ | ||
| 177 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | ||
| 178 | struct cppi_channel *tx_ch = controller->tx + i; | ||
| 179 | struct cppi_tx_stateram __iomem *tx; | ||
| 180 | |||
| 181 | INIT_LIST_HEAD(&tx_ch->tx_complete); | ||
| 182 | |||
| 183 | tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); | ||
| 184 | tx_ch->state_ram = tx; | ||
| 185 | cppi_reset_tx(tx, 0); | ||
| 186 | } | ||
| 187 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { | ||
| 188 | struct cppi_channel *rx_ch = controller->rx + i; | ||
| 189 | struct cppi_rx_stateram __iomem *rx; | ||
| 190 | |||
| 191 | INIT_LIST_HEAD(&rx_ch->tx_complete); | ||
| 192 | |||
| 193 | rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); | ||
| 194 | rx_ch->state_ram = rx; | ||
| 195 | cppi_reset_rx(rx); | ||
| 196 | } | ||
| 197 | |||
| 198 | /* enable individual cppi channels */ | ||
| 199 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | ||
| 200 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
| 201 | musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, | ||
| 202 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
| 203 | |||
| 204 | /* enable tx/rx CPPI control */ | ||
| 205 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); | ||
| 206 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); | ||
| 207 | |||
| 208 | /* disable RNDIS mode, also host rx RNDIS autorequest */ | ||
| 209 | musb_writel(tibase, DAVINCI_RNDIS_REG, 0); | ||
| 210 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); | ||
| 211 | |||
| 212 | return 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | /* | ||
| 216 | * Stop DMA controller | ||
| 217 | * | ||
| 218 | * De-Init the DMA controller as necessary. | ||
| 219 | */ | ||
| 220 | |||
| 221 | static int cppi_controller_stop(struct dma_controller *c) | ||
| 222 | { | ||
| 223 | struct cppi *controller; | ||
| 224 | void __iomem *tibase; | ||
| 225 | int i; | ||
| 226 | |||
| 227 | controller = container_of(c, struct cppi, controller); | ||
| 228 | |||
| 229 | tibase = controller->tibase; | ||
| 230 | /* DISABLE INDIVIDUAL CHANNEL Interrupts */ | ||
| 231 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, | ||
| 232 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
| 233 | musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, | ||
| 234 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
| 235 | |||
| 236 | DBG(1, "Tearing down RX and TX Channels\n"); | ||
| 237 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | ||
| 238 | /* FIXME restructure of txdma to use bds like rxdma */ | ||
| 239 | controller->tx[i].last_processed = NULL; | ||
| 240 | cppi_pool_free(controller->tx + i); | ||
| 241 | } | ||
| 242 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) | ||
| 243 | cppi_pool_free(controller->rx + i); | ||
| 244 | |||
| 245 | /* in Tx Case proper teardown is supported. We resort to disabling | ||
| 246 | * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is | ||
| 247 | * complete TX CPPI cannot be disabled. | ||
| 248 | */ | ||
| 249 | /*disable tx/rx cppi */ | ||
| 250 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); | ||
| 251 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); | ||
| 252 | |||
| 253 | return 0; | ||
| 254 | } | ||
| 255 | |||
| 256 | /* While dma channel is allocated, we only want the core irqs active | ||
| 257 | * for fault reports, otherwise we'd get irqs that we don't care about. | ||
| 258 | * Except for TX irqs, where dma done != fifo empty and reusable ... | ||
| 259 | * | ||
| 260 | * NOTE: docs don't say either way, but irq masking **enables** irqs. | ||
| 261 | * | ||
| 262 | * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... | ||
| 263 | */ | ||
| 264 | static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) | ||
| 265 | { | ||
| 266 | musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); | ||
| 267 | } | ||
| 268 | |||
| 269 | static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) | ||
| 270 | { | ||
| 271 | musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); | ||
| 272 | } | ||
| 273 | |||
| 274 | |||
| 275 | /* | ||
| 276 | * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to | ||
| 277 | * each transfer direction of a non-control endpoint, so allocating | ||
| 278 | * (and deallocating) is mostly a way to notice bad housekeeping on | ||
| 279 | * the software side. We assume the irqs are always active. | ||
| 280 | */ | ||
| 281 | static struct dma_channel * | ||
| 282 | cppi_channel_allocate(struct dma_controller *c, | ||
| 283 | struct musb_hw_ep *ep, u8 transmit) | ||
| 284 | { | ||
| 285 | struct cppi *controller; | ||
| 286 | u8 index; | ||
| 287 | struct cppi_channel *cppi_ch; | ||
| 288 | void __iomem *tibase; | ||
| 289 | |||
| 290 | controller = container_of(c, struct cppi, controller); | ||
| 291 | tibase = controller->tibase; | ||
| 292 | |||
| 293 | /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ | ||
| 294 | index = ep->epnum - 1; | ||
| 295 | |||
| 296 | /* return the corresponding CPPI Channel Handle, and | ||
| 297 | * probably disable the non-CPPI irq until we need it. | ||
| 298 | */ | ||
| 299 | if (transmit) { | ||
| 300 | if (index >= ARRAY_SIZE(controller->tx)) { | ||
| 301 | DBG(1, "no %cX%d CPPI channel\n", 'T', index); | ||
| 302 | return NULL; | ||
| 303 | } | ||
| 304 | cppi_ch = controller->tx + index; | ||
| 305 | } else { | ||
| 306 | if (index >= ARRAY_SIZE(controller->rx)) { | ||
| 307 | DBG(1, "no %cX%d CPPI channel\n", 'R', index); | ||
| 308 | return NULL; | ||
| 309 | } | ||
| 310 | cppi_ch = controller->rx + index; | ||
| 311 | core_rxirq_disable(tibase, ep->epnum); | ||
| 312 | } | ||
| 313 | |||
| 314 | /* REVISIT make this an error later once the same driver code works | ||
| 315 | * with the other DMA engine too | ||
| 316 | */ | ||
| 317 | if (cppi_ch->hw_ep) | ||
| 318 | DBG(1, "re-allocating DMA%d %cX channel %p\n", | ||
| 319 | index, transmit ? 'T' : 'R', cppi_ch); | ||
| 320 | cppi_ch->hw_ep = ep; | ||
| 321 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; | ||
| 322 | |||
| 323 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); | ||
| 324 | return &cppi_ch->channel; | ||
| 325 | } | ||
| 326 | |||
| 327 | /* Release a CPPI Channel. */ | ||
| 328 | static void cppi_channel_release(struct dma_channel *channel) | ||
| 329 | { | ||
| 330 | struct cppi_channel *c; | ||
| 331 | void __iomem *tibase; | ||
| 332 | |||
| 333 | /* REVISIT: for paranoia, check state and abort if needed... */ | ||
| 334 | |||
| 335 | c = container_of(channel, struct cppi_channel, channel); | ||
| 336 | tibase = c->controller->tibase; | ||
| 337 | if (!c->hw_ep) | ||
| 338 | DBG(1, "releasing idle DMA channel %p\n", c); | ||
| 339 | else if (!c->transmit) | ||
| 340 | core_rxirq_enable(tibase, c->index + 1); | ||
| 341 | |||
| 342 | /* for now, leave its cppi IRQ enabled (we won't trigger it) */ | ||
| 343 | c->hw_ep = NULL; | ||
| 344 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 345 | } | ||
| 346 | |||
| 347 | /* Context: controller irqlocked */ | ||
| 348 | static void | ||
| 349 | cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) | ||
| 350 | { | ||
| 351 | void __iomem *base = c->controller->mregs; | ||
| 352 | struct cppi_rx_stateram __iomem *rx = c->state_ram; | ||
| 353 | |||
| 354 | musb_ep_select(base, c->index + 1); | ||
| 355 | |||
| 356 | DBG(level, "RX DMA%d%s: %d left, csr %04x, " | ||
| 357 | "%08x H%08x S%08x C%08x, " | ||
| 358 | "B%08x L%08x %08x .. %08x" | ||
| 359 | "\n", | ||
| 360 | c->index, tag, | ||
| 361 | musb_readl(c->controller->tibase, | ||
| 362 | DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), | ||
| 363 | musb_readw(c->hw_ep->regs, MUSB_RXCSR), | ||
| 364 | |||
| 365 | musb_readl(&rx->rx_skipbytes, 0), | ||
| 366 | musb_readl(&rx->rx_head, 0), | ||
| 367 | musb_readl(&rx->rx_sop, 0), | ||
| 368 | musb_readl(&rx->rx_current, 0), | ||
| 369 | |||
| 370 | musb_readl(&rx->rx_buf_current, 0), | ||
| 371 | musb_readl(&rx->rx_len_len, 0), | ||
| 372 | musb_readl(&rx->rx_cnt_cnt, 0), | ||
| 373 | musb_readl(&rx->rx_complete, 0) | ||
| 374 | ); | ||
| 375 | } | ||
| 376 | |||
| 377 | /* Context: controller irqlocked */ | ||
| 378 | static void | ||
| 379 | cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) | ||
| 380 | { | ||
| 381 | void __iomem *base = c->controller->mregs; | ||
| 382 | struct cppi_tx_stateram __iomem *tx = c->state_ram; | ||
| 383 | |||
| 384 | musb_ep_select(base, c->index + 1); | ||
| 385 | |||
| 386 | DBG(level, "TX DMA%d%s: csr %04x, " | ||
| 387 | "H%08x S%08x C%08x %08x, " | ||
| 388 | "F%08x L%08x .. %08x" | ||
| 389 | "\n", | ||
| 390 | c->index, tag, | ||
| 391 | musb_readw(c->hw_ep->regs, MUSB_TXCSR), | ||
| 392 | |||
| 393 | musb_readl(&tx->tx_head, 0), | ||
| 394 | musb_readl(&tx->tx_buf, 0), | ||
| 395 | musb_readl(&tx->tx_current, 0), | ||
| 396 | musb_readl(&tx->tx_buf_current, 0), | ||
| 397 | |||
| 398 | musb_readl(&tx->tx_info, 0), | ||
| 399 | musb_readl(&tx->tx_rem_len, 0), | ||
| 400 | /* dummy/unused word 6 */ | ||
| 401 | musb_readl(&tx->tx_complete, 0) | ||
| 402 | ); | ||
| 403 | } | ||
| 404 | |||
| 405 | /* Context: controller irqlocked */ | ||
| 406 | static inline void | ||
| 407 | cppi_rndis_update(struct cppi_channel *c, int is_rx, | ||
| 408 | void __iomem *tibase, int is_rndis) | ||
| 409 | { | ||
| 410 | /* we may need to change the rndis flag for this cppi channel */ | ||
| 411 | if (c->is_rndis != is_rndis) { | ||
| 412 | u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); | ||
| 413 | u32 temp = 1 << (c->index); | ||
| 414 | |||
| 415 | if (is_rx) | ||
| 416 | temp <<= 16; | ||
| 417 | if (is_rndis) | ||
| 418 | value |= temp; | ||
| 419 | else | ||
| 420 | value &= ~temp; | ||
| 421 | musb_writel(tibase, DAVINCI_RNDIS_REG, value); | ||
| 422 | c->is_rndis = is_rndis; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) | ||
| 427 | { | ||
| 428 | pr_debug("RXBD/%s %08x: " | ||
| 429 | "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", | ||
| 430 | tag, bd->dma, | ||
| 431 | bd->hw_next, bd->hw_bufp, bd->hw_off_len, | ||
| 432 | bd->hw_options); | ||
| 433 | } | ||
| 434 | |||
| 435 | static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) | ||
| 436 | { | ||
| 437 | #if MUSB_DEBUG > 0 | ||
| 438 | struct cppi_descriptor *bd; | ||
| 439 | |||
| 440 | if (!_dbg_level(level)) | ||
| 441 | return; | ||
| 442 | cppi_dump_rx(level, rx, tag); | ||
| 443 | if (rx->last_processed) | ||
| 444 | cppi_dump_rxbd("last", rx->last_processed); | ||
| 445 | for (bd = rx->head; bd; bd = bd->next) | ||
| 446 | cppi_dump_rxbd("active", bd); | ||
| 447 | #endif | ||
| 448 | } | ||
| 449 | |||
| 450 | |||
| 451 | /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; | ||
| 452 | * so we won't ever use it (see "CPPI RX Woes" below). | ||
| 453 | */ | ||
| 454 | static inline int cppi_autoreq_update(struct cppi_channel *rx, | ||
| 455 | void __iomem *tibase, int onepacket, unsigned n_bds) | ||
| 456 | { | ||
| 457 | u32 val; | ||
| 458 | |||
| 459 | #ifdef RNDIS_RX_IS_USABLE | ||
| 460 | u32 tmp; | ||
| 461 | /* assert(is_host_active(musb)) */ | ||
| 462 | |||
| 463 | /* start from "AutoReq never" */ | ||
| 464 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | ||
| 465 | val = tmp & ~((0x3) << (rx->index * 2)); | ||
| 466 | |||
| 467 | /* HCD arranged reqpkt for packet #1. we arrange int | ||
| 468 | * for all but the last one, maybe in two segments. | ||
| 469 | */ | ||
| 470 | if (!onepacket) { | ||
| 471 | #if 0 | ||
| 472 | /* use two segments, autoreq "all" then the last "never" */ | ||
| 473 | val |= ((0x3) << (rx->index * 2)); | ||
| 474 | n_bds--; | ||
| 475 | #else | ||
| 476 | /* one segment, autoreq "all-but-last" */ | ||
| 477 | val |= ((0x1) << (rx->index * 2)); | ||
| 478 | #endif | ||
| 479 | } | ||
| 480 | |||
| 481 | if (val != tmp) { | ||
| 482 | int n = 100; | ||
| 483 | |||
| 484 | /* make sure that autoreq is updated before continuing */ | ||
| 485 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); | ||
| 486 | do { | ||
| 487 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | ||
| 488 | if (tmp == val) | ||
| 489 | break; | ||
| 490 | cpu_relax(); | ||
| 491 | } while (n-- > 0); | ||
| 492 | } | ||
| 493 | #endif | ||
| 494 | |||
| 495 | /* REQPKT is turned off after each segment */ | ||
| 496 | if (n_bds && rx->channel.actual_len) { | ||
| 497 | void __iomem *regs = rx->hw_ep->regs; | ||
| 498 | |||
| 499 | val = musb_readw(regs, MUSB_RXCSR); | ||
| 500 | if (!(val & MUSB_RXCSR_H_REQPKT)) { | ||
| 501 | val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; | ||
| 502 | musb_writew(regs, MUSB_RXCSR, val); | ||
| 503 | /* flush writebufer */ | ||
| 504 | val = musb_readw(regs, MUSB_RXCSR); | ||
| 505 | } | ||
| 506 | } | ||
| 507 | return n_bds; | ||
| 508 | } | ||
| 509 | |||
| 510 | |||
| 511 | /* Buffer enqueuing Logic: | ||
| 512 | * | ||
| 513 | * - RX builds new queues each time, to help handle routine "early | ||
| 514 | * termination" cases (faults, including errors and short reads) | ||
| 515 | * more correctly. | ||
| 516 | * | ||
| 517 | * - for now, TX reuses the same queue of BDs every time | ||
| 518 | * | ||
| 519 | * REVISIT long term, we want a normal dynamic model. | ||
| 520 | * ... the goal will be to append to the | ||
| 521 | * existing queue, processing completed "dma buffers" (segments) on the fly. | ||
| 522 | * | ||
| 523 | * Otherwise we force an IRQ latency between requests, which slows us a lot | ||
| 524 | * (especially in "transparent" dma). Unfortunately that model seems to be | ||
| 525 | * inherent in the DMA model from the Mentor code, except in the rare case | ||
| 526 | * of transfers big enough (~128+ KB) that we could append "middle" segments | ||
| 527 | * in the TX paths. (RX can't do this, see below.) | ||
| 528 | * | ||
| 529 | * That's true even in the CPPI- friendly iso case, where most urbs have | ||
| 530 | * several small segments provided in a group and where the "packet at a time" | ||
| 531 | * "transparent" DMA model is always correct, even on the RX side. | ||
| 532 | */ | ||
| 533 | |||
| 534 | /* | ||
| 535 | * CPPI TX: | ||
| 536 | * ======== | ||
| 537 | * TX is a lot more reasonable than RX; it doesn't need to run in | ||
| 538 | * irq-per-packet mode very often. RNDIS mode seems to behave too | ||
| 539 | * (except how it handles the exactly-N-packets case). Building a | ||
| 540 | * txdma queue with multiple requests (urb or usb_request) looks | ||
| 541 | * like it would work ... but fault handling would need much testing. | ||
| 542 | * | ||
| 543 | * The main issue with TX mode RNDIS relates to transfer lengths that | ||
| 544 | * are an exact multiple of the packet length. It appears that there's | ||
| 545 | * a hiccup in that case (maybe the DMA completes before the ZLP gets | ||
| 546 | * written?) boiling down to not being able to rely on CPPI writing any | ||
| 547 | * terminating zero length packet before the next transfer is written. | ||
| 548 | * So that's punted to PIO; better yet, gadget drivers can avoid it. | ||
| 549 | * | ||
| 550 | * Plus, there's allegedly an undocumented constraint that rndis transfer | ||
| 551 | * length be a multiple of 64 bytes ... but the chip doesn't act that | ||
| 552 | * way, and we really don't _want_ that behavior anyway. | ||
| 553 | * | ||
| 554 | * On TX, "transparent" mode works ... although experiments have shown | ||
| 555 | * problems trying to use the SOP/EOP bits in different USB packets. | ||
| 556 | * | ||
| 557 | * REVISIT try to handle terminating zero length packets using CPPI | ||
| 558 | * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet | ||
| 559 | * links avoid that issue by forcing them to avoid zlps.) | ||
| 560 | */ | ||
| 561 | static void | ||
| 562 | cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | ||
| 563 | { | ||
| 564 | unsigned maxpacket = tx->maxpacket; | ||
| 565 | dma_addr_t addr = tx->buf_dma + tx->offset; | ||
| 566 | size_t length = tx->buf_len - tx->offset; | ||
| 567 | struct cppi_descriptor *bd; | ||
| 568 | unsigned n_bds; | ||
| 569 | unsigned i; | ||
| 570 | struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; | ||
| 571 | int rndis; | ||
| 572 | |||
| 573 | /* TX can use the CPPI "rndis" mode, where we can probably fit this | ||
| 574 | * transfer in one BD and one IRQ. The only time we would NOT want | ||
| 575 | * to use it is when hardware constraints prevent it, or if we'd | ||
| 576 | * trigger the "send a ZLP?" confusion. | ||
| 577 | */ | ||
| 578 | rndis = (maxpacket & 0x3f) == 0 | ||
| 579 | && length < 0xffff | ||
| 580 | && (length % maxpacket) != 0; | ||
| 581 | |||
| 582 | if (rndis) { | ||
| 583 | maxpacket = length; | ||
| 584 | n_bds = 1; | ||
| 585 | } else { | ||
| 586 | n_bds = length / maxpacket; | ||
| 587 | if (!length || (length % maxpacket)) | ||
| 588 | n_bds++; | ||
| 589 | n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); | ||
| 590 | length = min(n_bds * maxpacket, length); | ||
| 591 | } | ||
| 592 | |||
| 593 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", | ||
| 594 | tx->index, | ||
| 595 | maxpacket, | ||
| 596 | rndis ? "rndis" : "transparent", | ||
| 597 | n_bds, | ||
| 598 | addr, length); | ||
| 599 | |||
| 600 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); | ||
| 601 | |||
| 602 | /* assuming here that channel_program is called during | ||
| 603 | * transfer initiation ... current code maintains state | ||
| 604 | * for one outstanding request only (no queues, not even | ||
| 605 | * the implicit ones of an iso urb). | ||
| 606 | */ | ||
| 607 | |||
| 608 | bd = tx->freelist; | ||
| 609 | tx->head = bd; | ||
| 610 | tx->last_processed = NULL; | ||
| 611 | |||
| 612 | /* FIXME use BD pool like RX side does, and just queue | ||
| 613 | * the minimum number for this request. | ||
| 614 | */ | ||
| 615 | |||
| 616 | /* Prepare queue of BDs first, then hand it to hardware. | ||
| 617 | * All BDs except maybe the last should be of full packet | ||
| 618 | * size; for RNDIS there _is_ only that last packet. | ||
| 619 | */ | ||
| 620 | for (i = 0; i < n_bds; ) { | ||
| 621 | if (++i < n_bds && bd->next) | ||
| 622 | bd->hw_next = bd->next->dma; | ||
| 623 | else | ||
| 624 | bd->hw_next = 0; | ||
| 625 | |||
| 626 | bd->hw_bufp = tx->buf_dma + tx->offset; | ||
| 627 | |||
| 628 | /* FIXME set EOP only on the last packet, | ||
| 629 | * SOP only on the first ... avoid IRQs | ||
| 630 | */ | ||
| 631 | if ((tx->offset + maxpacket) <= tx->buf_len) { | ||
| 632 | tx->offset += maxpacket; | ||
| 633 | bd->hw_off_len = maxpacket; | ||
| 634 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET | ||
| 635 | | CPPI_OWN_SET | maxpacket; | ||
| 636 | } else { | ||
| 637 | /* only this one may be a partial USB Packet */ | ||
| 638 | u32 partial_len; | ||
| 639 | |||
| 640 | partial_len = tx->buf_len - tx->offset; | ||
| 641 | tx->offset = tx->buf_len; | ||
| 642 | bd->hw_off_len = partial_len; | ||
| 643 | |||
| 644 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET | ||
| 645 | | CPPI_OWN_SET | partial_len; | ||
| 646 | if (partial_len == 0) | ||
| 647 | bd->hw_options |= CPPI_ZERO_SET; | ||
| 648 | } | ||
| 649 | |||
| 650 | DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", | ||
| 651 | bd, bd->hw_next, bd->hw_bufp, | ||
| 652 | bd->hw_off_len, bd->hw_options); | ||
| 653 | |||
| 654 | /* update the last BD enqueued to the list */ | ||
| 655 | tx->tail = bd; | ||
| 656 | bd = bd->next; | ||
| 657 | } | ||
| 658 | |||
| 659 | /* BDs live in DMA-coherent memory, but writes might be pending */ | ||
| 660 | cpu_drain_writebuffer(); | ||
| 661 | |||
| 662 | /* Write to the HeadPtr in state RAM to trigger */ | ||
| 663 | musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); | ||
| 664 | |||
| 665 | cppi_dump_tx(5, tx, "/S"); | ||
| 666 | } | ||
| 667 | |||
| 668 | /* | ||
| 669 | * CPPI RX Woes: | ||
| 670 | * ============= | ||
| 671 | * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte | ||
| 672 | * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. | ||
| 673 | * (Full speed transfers have similar scenarios.) | ||
| 674 | * | ||
| 675 | * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, | ||
| 676 | * and the next packet goes into a buffer that's queued later; while (b) fills | ||
| 677 | * the buffer with 1024 bytes. How to do that with CPPI? | ||
| 678 | * | ||
| 679 | * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but | ||
| 680 | * (b) loses **BADLY** because nothing (!) happens when that second packet | ||
| 681 | * fills the buffer, much less when a third one arrives. (Which makes this | ||
| 682 | * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination | ||
| 683 | * is optional, and it's fine if peripherals -- not hosts! -- pad messages | ||
| 684 | * out to end-of-buffer. Standard PCI host controller DMA descriptors | ||
| 685 | * implement that mode by default ... which is no accident.) | ||
| 686 | * | ||
| 687 | * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have | ||
| 688 | * converse problems: (b) is handled right, but (a) loses badly. CPPI RX | ||
| 689 | * ignores SOP/EOP markings and processes both of those BDs; so both packets | ||
| 690 | * are loaded into the buffer (with a 212 byte gap between them), and the next | ||
| 691 | * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP | ||
| 692 | * are intended as outputs for RX queues, not inputs...) | ||
| 693 | * | ||
| 694 | * - A variant of "transparent" mode -- one BD at a time -- is the only way to | ||
| 695 | * reliably make both cases work, with software handling both cases correctly | ||
| 696 | * and at the significant penalty of needing an IRQ per packet. (The lack of | ||
| 697 | * I/O overlap can be slightly ameliorated by enabling double buffering.) | ||
| 698 | * | ||
| 699 | * So how to get rid of IRQ-per-packet? The transparent multi-BD case could | ||
| 700 | * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK | ||
| 701 | * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors | ||
| 702 | * with guaranteed driver level fault recovery and scrubbing out what's left | ||
| 703 | * of that garbaged datastream. | ||
| 704 | * | ||
| 705 | * But there seems to be no way to identify the cases where CPPI RNDIS mode | ||
| 706 | * is appropriate -- which do NOT include RNDIS host drivers, but do include | ||
| 707 | * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. | ||
| 708 | * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic | ||
| 709 | * that applies best on the peripheral side (and which could fail rudely). | ||
| 710 | * | ||
| 711 | * Leaving only "transparent" mode; we avoid multi-bd modes in almost all | ||
| 712 | * cases other than mass storage class. Otherwise we're correct but slow, | ||
| 713 | * since CPPI penalizes our need for a "true RNDIS" default mode. | ||
| 714 | */ | ||
| 715 | |||
| 716 | |||
| 717 | /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY | ||
| 718 | * | ||
| 719 | * IFF | ||
| 720 | * (a) peripheral mode ... since rndis peripherals could pad their | ||
| 721 | * writes to hosts, causing i/o failure; or we'd have to cope with | ||
| 722 | * a largely unknowable variety of host side protocol variants | ||
| 723 | * (b) and short reads are NOT errors ... since full reads would | ||
| 724 | * cause those same i/o failures | ||
| 725 | * (c) and read length is | ||
| 726 | * - less than 64KB (max per cppi descriptor) | ||
| 727 | * - not a multiple of 4096 (g_zero default, full reads typical) | ||
| 728 | * - N (>1) packets long, ditto (full reads not EXPECTED) | ||
| 729 | * THEN | ||
| 730 | * try rx rndis mode | ||
| 731 | * | ||
| 732 | * Cost of heuristic failing: RXDMA wedges at the end of transfers that | ||
| 733 | * fill out the whole buffer. Buggy host side usb network drivers could | ||
| 734 | * trigger that, but "in the field" such bugs seem to be all but unknown. | ||
| 735 | * | ||
| 736 | * So this module parameter lets the heuristic be disabled. When using | ||
| 737 | * gadgetfs, the heuristic will probably need to be disabled. | ||
| 738 | */ | ||
| 739 | static int cppi_rx_rndis = 1; | ||
| 740 | |||
| 741 | module_param(cppi_rx_rndis, bool, 0); | ||
| 742 | MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); | ||
| 743 | |||
| 744 | |||
| 745 | /** | ||
| 746 | * cppi_next_rx_segment - dma read for the next chunk of a buffer | ||
| 747 | * @musb: the controller | ||
| 748 | * @rx: dma channel | ||
| 749 | * @onepacket: true unless caller treats short reads as errors, and | ||
| 750 | * performs fault recovery above usbcore. | ||
| 751 | * Context: controller irqlocked | ||
| 752 | * | ||
| 753 | * See above notes about why we can't use multi-BD RX queues except in | ||
| 754 | * rare cases (mass storage class), and can never use the hardware "rndis" | ||
| 755 | * mode (since it's not a "true" RNDIS mode) with complete safety.. | ||
| 756 | * | ||
| 757 | * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in | ||
| 758 | * code to recover from corrupted datastreams after each short transfer. | ||
| 759 | */ | ||
| 760 | static void | ||
| 761 | cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | ||
| 762 | { | ||
| 763 | unsigned maxpacket = rx->maxpacket; | ||
| 764 | dma_addr_t addr = rx->buf_dma + rx->offset; | ||
| 765 | size_t length = rx->buf_len - rx->offset; | ||
| 766 | struct cppi_descriptor *bd, *tail; | ||
| 767 | unsigned n_bds; | ||
| 768 | unsigned i; | ||
| 769 | void __iomem *tibase = musb->ctrl_base; | ||
| 770 | int is_rndis = 0; | ||
| 771 | struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; | ||
| 772 | |||
| 773 | if (onepacket) { | ||
| 774 | /* almost every USB driver, host or peripheral side */ | ||
| 775 | n_bds = 1; | ||
| 776 | |||
| 777 | /* maybe apply the heuristic above */ | ||
| 778 | if (cppi_rx_rndis | ||
| 779 | && is_peripheral_active(musb) | ||
| 780 | && length > maxpacket | ||
| 781 | && (length & ~0xffff) == 0 | ||
| 782 | && (length & 0x0fff) != 0 | ||
| 783 | && (length & (maxpacket - 1)) == 0) { | ||
| 784 | maxpacket = length; | ||
| 785 | is_rndis = 1; | ||
| 786 | } | ||
| 787 | } else { | ||
| 788 | /* virtually nothing except mass storage class */ | ||
| 789 | if (length > 0xffff) { | ||
| 790 | n_bds = 0xffff / maxpacket; | ||
| 791 | length = n_bds * maxpacket; | ||
| 792 | } else { | ||
| 793 | n_bds = length / maxpacket; | ||
| 794 | if (length % maxpacket) | ||
| 795 | n_bds++; | ||
| 796 | } | ||
| 797 | if (n_bds == 1) | ||
| 798 | onepacket = 1; | ||
| 799 | else | ||
| 800 | n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); | ||
| 801 | } | ||
| 802 | |||
| 803 | /* In host mode, autorequest logic can generate some IN tokens; it's | ||
| 804 | * tricky since we can't leave REQPKT set in RXCSR after the transfer | ||
| 805 | * finishes. So: multipacket transfers involve two or more segments. | ||
| 806 | * And always at least two IRQs ... RNDIS mode is not an option. | ||
| 807 | */ | ||
| 808 | if (is_host_active(musb)) | ||
| 809 | n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); | ||
| 810 | |||
| 811 | cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); | ||
| 812 | |||
| 813 | length = min(n_bds * maxpacket, length); | ||
| 814 | |||
| 815 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " | ||
| 816 | "dma 0x%x len %u %u/%u\n", | ||
| 817 | rx->index, maxpacket, | ||
| 818 | onepacket | ||
| 819 | ? (is_rndis ? "rndis" : "onepacket") | ||
| 820 | : "multipacket", | ||
| 821 | n_bds, | ||
| 822 | musb_readl(tibase, | ||
| 823 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | ||
| 824 | & 0xffff, | ||
| 825 | addr, length, rx->channel.actual_len, rx->buf_len); | ||
| 826 | |||
| 827 | /* only queue one segment at a time, since the hardware prevents | ||
| 828 | * correct queue shutdown after unexpected short packets | ||
| 829 | */ | ||
| 830 | bd = cppi_bd_alloc(rx); | ||
| 831 | rx->head = bd; | ||
| 832 | |||
| 833 | /* Build BDs for all packets in this segment */ | ||
| 834 | for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { | ||
| 835 | u32 bd_len; | ||
| 836 | |||
| 837 | if (i) { | ||
| 838 | bd = cppi_bd_alloc(rx); | ||
| 839 | if (!bd) | ||
| 840 | break; | ||
| 841 | tail->next = bd; | ||
| 842 | tail->hw_next = bd->dma; | ||
| 843 | } | ||
| 844 | bd->hw_next = 0; | ||
| 845 | |||
| 846 | /* all but the last packet will be maxpacket size */ | ||
| 847 | if (maxpacket < length) | ||
| 848 | bd_len = maxpacket; | ||
| 849 | else | ||
| 850 | bd_len = length; | ||
| 851 | |||
| 852 | bd->hw_bufp = addr; | ||
| 853 | addr += bd_len; | ||
| 854 | rx->offset += bd_len; | ||
| 855 | |||
| 856 | bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; | ||
| 857 | bd->buflen = bd_len; | ||
| 858 | |||
| 859 | bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); | ||
| 860 | length -= bd_len; | ||
| 861 | } | ||
| 862 | |||
| 863 | /* we always expect at least one reusable BD! */ | ||
| 864 | if (!tail) { | ||
| 865 | WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); | ||
| 866 | return; | ||
| 867 | } else if (i < n_bds) | ||
| 868 | WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); | ||
| 869 | |||
| 870 | tail->next = NULL; | ||
| 871 | tail->hw_next = 0; | ||
| 872 | |||
| 873 | bd = rx->head; | ||
| 874 | rx->tail = tail; | ||
| 875 | |||
| 876 | /* short reads and other faults should terminate this entire | ||
| 877 | * dma segment. we want one "dma packet" per dma segment, not | ||
| 878 | * one per USB packet, terminating the whole queue at once... | ||
| 879 | * NOTE that current hardware seems to ignore SOP and EOP. | ||
| 880 | */ | ||
| 881 | bd->hw_options |= CPPI_SOP_SET; | ||
| 882 | tail->hw_options |= CPPI_EOP_SET; | ||
| 883 | |||
| 884 | if (debug >= 5) { | ||
| 885 | struct cppi_descriptor *d; | ||
| 886 | |||
| 887 | for (d = rx->head; d; d = d->next) | ||
| 888 | cppi_dump_rxbd("S", d); | ||
| 889 | } | ||
| 890 | |||
| 891 | /* in case the preceding transfer left some state... */ | ||
| 892 | tail = rx->last_processed; | ||
| 893 | if (tail) { | ||
| 894 | tail->next = bd; | ||
| 895 | tail->hw_next = bd->dma; | ||
| 896 | } | ||
| 897 | |||
| 898 | core_rxirq_enable(tibase, rx->index + 1); | ||
| 899 | |||
| 900 | /* BDs live in DMA-coherent memory, but writes might be pending */ | ||
| 901 | cpu_drain_writebuffer(); | ||
| 902 | |||
| 903 | /* REVISIT specs say to write this AFTER the BUFCNT register | ||
| 904 | * below ... but that loses badly. | ||
| 905 | */ | ||
| 906 | musb_writel(&rx_ram->rx_head, 0, bd->dma); | ||
| 907 | |||
| 908 | /* bufferCount must be at least 3, and zeroes on completion | ||
| 909 | * unless it underflows below zero, or stops at two, or keeps | ||
| 910 | * growing ... grr. | ||
| 911 | */ | ||
| 912 | i = musb_readl(tibase, | ||
| 913 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | ||
| 914 | & 0xffff; | ||
| 915 | |||
| 916 | if (!i) | ||
| 917 | musb_writel(tibase, | ||
| 918 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | ||
| 919 | n_bds + 2); | ||
| 920 | else if (n_bds > (i - 3)) | ||
| 921 | musb_writel(tibase, | ||
| 922 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | ||
| 923 | n_bds - (i - 3)); | ||
| 924 | |||
| 925 | i = musb_readl(tibase, | ||
| 926 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | ||
| 927 | & 0xffff; | ||
| 928 | if (i < (2 + n_bds)) { | ||
| 929 | DBG(2, "bufcnt%d underrun - %d (for %d)\n", | ||
| 930 | rx->index, i, n_bds); | ||
| 931 | musb_writel(tibase, | ||
| 932 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | ||
| 933 | n_bds + 2); | ||
| 934 | } | ||
| 935 | |||
| 936 | cppi_dump_rx(4, rx, "/S"); | ||
| 937 | } | ||
| 938 | |||
| 939 | /** | ||
| 940 | * cppi_channel_program - program channel for data transfer | ||
| 941 | * @ch: the channel | ||
| 942 | * @maxpacket: max packet size | ||
| 943 | * @mode: For RX, 1 unless the usb protocol driver promised to treat | ||
| 944 | * all short reads as errors and kick in high level fault recovery. | ||
| 945 | * For TX, ignored because of RNDIS mode races/glitches. | ||
| 946 | * @dma_addr: dma address of buffer | ||
| 947 | * @len: length of buffer | ||
| 948 | * Context: controller irqlocked | ||
| 949 | */ | ||
| 950 | static int cppi_channel_program(struct dma_channel *ch, | ||
| 951 | u16 maxpacket, u8 mode, | ||
| 952 | dma_addr_t dma_addr, u32 len) | ||
| 953 | { | ||
| 954 | struct cppi_channel *cppi_ch; | ||
| 955 | struct cppi *controller; | ||
| 956 | struct musb *musb; | ||
| 957 | |||
| 958 | cppi_ch = container_of(ch, struct cppi_channel, channel); | ||
| 959 | controller = cppi_ch->controller; | ||
| 960 | musb = controller->musb; | ||
| 961 | |||
| 962 | switch (ch->status) { | ||
| 963 | case MUSB_DMA_STATUS_BUS_ABORT: | ||
| 964 | case MUSB_DMA_STATUS_CORE_ABORT: | ||
| 965 | /* fault irq handler should have handled cleanup */ | ||
| 966 | WARNING("%cX DMA%d not cleaned up after abort!\n", | ||
| 967 | cppi_ch->transmit ? 'T' : 'R', | ||
| 968 | cppi_ch->index); | ||
| 969 | /* WARN_ON(1); */ | ||
| 970 | break; | ||
| 971 | case MUSB_DMA_STATUS_BUSY: | ||
| 972 | WARNING("program active channel? %cX DMA%d\n", | ||
| 973 | cppi_ch->transmit ? 'T' : 'R', | ||
| 974 | cppi_ch->index); | ||
| 975 | /* WARN_ON(1); */ | ||
| 976 | break; | ||
| 977 | case MUSB_DMA_STATUS_UNKNOWN: | ||
| 978 | DBG(1, "%cX DMA%d not allocated!\n", | ||
| 979 | cppi_ch->transmit ? 'T' : 'R', | ||
| 980 | cppi_ch->index); | ||
| 981 | /* FALLTHROUGH */ | ||
| 982 | case MUSB_DMA_STATUS_FREE: | ||
| 983 | break; | ||
| 984 | } | ||
| 985 | |||
| 986 | ch->status = MUSB_DMA_STATUS_BUSY; | ||
| 987 | |||
| 988 | /* set transfer parameters, then queue up its first segment */ | ||
| 989 | cppi_ch->buf_dma = dma_addr; | ||
| 990 | cppi_ch->offset = 0; | ||
| 991 | cppi_ch->maxpacket = maxpacket; | ||
| 992 | cppi_ch->buf_len = len; | ||
| 993 | |||
| 994 | /* TX channel? or RX? */ | ||
| 995 | if (cppi_ch->transmit) | ||
| 996 | cppi_next_tx_segment(musb, cppi_ch); | ||
| 997 | else | ||
| 998 | cppi_next_rx_segment(musb, cppi_ch, mode); | ||
| 999 | |||
| 1000 | return true; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | ||
| 1004 | { | ||
| 1005 | struct cppi_channel *rx = &cppi->rx[ch]; | ||
| 1006 | struct cppi_rx_stateram __iomem *state = rx->state_ram; | ||
| 1007 | struct cppi_descriptor *bd; | ||
| 1008 | struct cppi_descriptor *last = rx->last_processed; | ||
| 1009 | bool completed = false; | ||
| 1010 | bool acked = false; | ||
| 1011 | int i; | ||
| 1012 | dma_addr_t safe2ack; | ||
| 1013 | void __iomem *regs = rx->hw_ep->regs; | ||
| 1014 | |||
| 1015 | cppi_dump_rx(6, rx, "/K"); | ||
| 1016 | |||
| 1017 | bd = last ? last->next : rx->head; | ||
| 1018 | if (!bd) | ||
| 1019 | return false; | ||
| 1020 | |||
| 1021 | /* run through all completed BDs */ | ||
| 1022 | for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); | ||
| 1023 | (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; | ||
| 1024 | i++, bd = bd->next) { | ||
| 1025 | u16 len; | ||
| 1026 | |||
| 1027 | /* catch latest BD writes from CPPI */ | ||
| 1028 | rmb(); | ||
| 1029 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) | ||
| 1030 | break; | ||
| 1031 | |||
| 1032 | DBG(5, "C/RXBD %08x: nxt %08x buf %08x " | ||
| 1033 | "off.len %08x opt.len %08x (%d)\n", | ||
| 1034 | bd->dma, bd->hw_next, bd->hw_bufp, | ||
| 1035 | bd->hw_off_len, bd->hw_options, | ||
| 1036 | rx->channel.actual_len); | ||
| 1037 | |||
| 1038 | /* actual packet received length */ | ||
| 1039 | if ((bd->hw_options & CPPI_SOP_SET) && !completed) | ||
| 1040 | len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; | ||
| 1041 | else | ||
| 1042 | len = 0; | ||
| 1043 | |||
| 1044 | if (bd->hw_options & CPPI_EOQ_MASK) | ||
| 1045 | completed = true; | ||
| 1046 | |||
| 1047 | if (!completed && len < bd->buflen) { | ||
| 1048 | /* NOTE: when we get a short packet, RXCSR_H_REQPKT | ||
| 1049 | * must have been cleared, and no more DMA packets may | ||
| 1050 | * active be in the queue... TI docs didn't say, but | ||
| 1051 | * CPPI ignores those BDs even though OWN is still set. | ||
| 1052 | */ | ||
| 1053 | completed = true; | ||
| 1054 | DBG(3, "rx short %d/%d (%d)\n", | ||
| 1055 | len, bd->buflen, | ||
| 1056 | rx->channel.actual_len); | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | /* If we got here, we expect to ack at least one BD; meanwhile | ||
| 1060 | * CPPI may completing other BDs while we scan this list... | ||
| 1061 | * | ||
| 1062 | * RACE: we can notice OWN cleared before CPPI raises the | ||
| 1063 | * matching irq by writing that BD as the completion pointer. | ||
| 1064 | * In such cases, stop scanning and wait for the irq, avoiding | ||
| 1065 | * lost acks and states where BD ownership is unclear. | ||
| 1066 | */ | ||
| 1067 | if (bd->dma == safe2ack) { | ||
| 1068 | musb_writel(&state->rx_complete, 0, safe2ack); | ||
| 1069 | safe2ack = musb_readl(&state->rx_complete, 0); | ||
| 1070 | acked = true; | ||
| 1071 | if (bd->dma == safe2ack) | ||
| 1072 | safe2ack = 0; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | rx->channel.actual_len += len; | ||
| 1076 | |||
| 1077 | cppi_bd_free(rx, last); | ||
| 1078 | last = bd; | ||
| 1079 | |||
| 1080 | /* stop scanning on end-of-segment */ | ||
| 1081 | if (bd->hw_next == 0) | ||
| 1082 | completed = true; | ||
| 1083 | } | ||
| 1084 | rx->last_processed = last; | ||
| 1085 | |||
| 1086 | /* dma abort, lost ack, or ... */ | ||
| 1087 | if (!acked && last) { | ||
| 1088 | int csr; | ||
| 1089 | |||
| 1090 | if (safe2ack == 0 || safe2ack == rx->last_processed->dma) | ||
| 1091 | musb_writel(&state->rx_complete, 0, safe2ack); | ||
| 1092 | if (safe2ack == 0) { | ||
| 1093 | cppi_bd_free(rx, last); | ||
| 1094 | rx->last_processed = NULL; | ||
| 1095 | |||
| 1096 | /* if we land here on the host side, H_REQPKT will | ||
| 1097 | * be clear and we need to restart the queue... | ||
| 1098 | */ | ||
| 1099 | WARN_ON(rx->head); | ||
| 1100 | } | ||
| 1101 | musb_ep_select(cppi->mregs, rx->index + 1); | ||
| 1102 | csr = musb_readw(regs, MUSB_RXCSR); | ||
| 1103 | if (csr & MUSB_RXCSR_DMAENAB) { | ||
| 1104 | DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", | ||
| 1105 | rx->index, | ||
| 1106 | rx->head, rx->tail, | ||
| 1107 | rx->last_processed | ||
| 1108 | ? rx->last_processed->dma | ||
| 1109 | : 0, | ||
| 1110 | completed ? ", completed" : "", | ||
| 1111 | csr); | ||
| 1112 | cppi_dump_rxq(4, "/what?", rx); | ||
| 1113 | } | ||
| 1114 | } | ||
| 1115 | if (!completed) { | ||
| 1116 | int csr; | ||
| 1117 | |||
| 1118 | rx->head = bd; | ||
| 1119 | |||
| 1120 | /* REVISIT seems like "autoreq all but EOP" doesn't... | ||
| 1121 | * setting it here "should" be racey, but seems to work | ||
| 1122 | */ | ||
| 1123 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); | ||
| 1124 | if (is_host_active(cppi->musb) | ||
| 1125 | && bd | ||
| 1126 | && !(csr & MUSB_RXCSR_H_REQPKT)) { | ||
| 1127 | csr |= MUSB_RXCSR_H_REQPKT; | ||
| 1128 | musb_writew(regs, MUSB_RXCSR, | ||
| 1129 | MUSB_RXCSR_H_WZC_BITS | csr); | ||
| 1130 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); | ||
| 1131 | } | ||
| 1132 | } else { | ||
| 1133 | rx->head = NULL; | ||
| 1134 | rx->tail = NULL; | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); | ||
| 1138 | return completed; | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | void cppi_completion(struct musb *musb, u32 rx, u32 tx) | ||
| 1142 | { | ||
| 1143 | void __iomem *tibase; | ||
| 1144 | int i, index; | ||
| 1145 | struct cppi *cppi; | ||
| 1146 | struct musb_hw_ep *hw_ep = NULL; | ||
| 1147 | |||
| 1148 | cppi = container_of(musb->dma_controller, struct cppi, controller); | ||
| 1149 | |||
| 1150 | tibase = musb->ctrl_base; | ||
| 1151 | |||
| 1152 | /* process TX channels */ | ||
| 1153 | for (index = 0; tx; tx = tx >> 1, index++) { | ||
| 1154 | struct cppi_channel *tx_ch; | ||
| 1155 | struct cppi_tx_stateram __iomem *tx_ram; | ||
| 1156 | bool completed = false; | ||
| 1157 | struct cppi_descriptor *bd; | ||
| 1158 | |||
| 1159 | if (!(tx & 1)) | ||
| 1160 | continue; | ||
| 1161 | |||
| 1162 | tx_ch = cppi->tx + index; | ||
| 1163 | tx_ram = tx_ch->state_ram; | ||
| 1164 | |||
| 1165 | /* FIXME need a cppi_tx_scan() routine, which | ||
| 1166 | * can also be called from abort code | ||
| 1167 | */ | ||
| 1168 | |||
| 1169 | cppi_dump_tx(5, tx_ch, "/E"); | ||
| 1170 | |||
| 1171 | bd = tx_ch->head; | ||
| 1172 | |||
| 1173 | if (NULL == bd) { | ||
| 1174 | DBG(1, "null BD\n"); | ||
| 1175 | continue; | ||
| 1176 | } | ||
| 1177 | |||
| 1178 | /* run through all completed BDs */ | ||
| 1179 | for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; | ||
| 1180 | i++, bd = bd->next) { | ||
| 1181 | u16 len; | ||
| 1182 | |||
| 1183 | /* catch latest BD writes from CPPI */ | ||
| 1184 | rmb(); | ||
| 1185 | if (bd->hw_options & CPPI_OWN_SET) | ||
| 1186 | break; | ||
| 1187 | |||
| 1188 | DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", | ||
| 1189 | bd, bd->hw_next, bd->hw_bufp, | ||
| 1190 | bd->hw_off_len, bd->hw_options); | ||
| 1191 | |||
| 1192 | len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; | ||
| 1193 | tx_ch->channel.actual_len += len; | ||
| 1194 | |||
| 1195 | tx_ch->last_processed = bd; | ||
| 1196 | |||
| 1197 | /* write completion register to acknowledge | ||
| 1198 | * processing of completed BDs, and possibly | ||
| 1199 | * release the IRQ; EOQ might not be set ... | ||
| 1200 | * | ||
| 1201 | * REVISIT use the same ack strategy as rx | ||
| 1202 | * | ||
| 1203 | * REVISIT have observed bit 18 set; huh?? | ||
| 1204 | */ | ||
| 1205 | /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ | ||
| 1206 | musb_writel(&tx_ram->tx_complete, 0, bd->dma); | ||
| 1207 | |||
| 1208 | /* stop scanning on end-of-segment */ | ||
| 1209 | if (bd->hw_next == 0) | ||
| 1210 | completed = true; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | /* on end of segment, maybe go to next one */ | ||
| 1214 | if (completed) { | ||
| 1215 | /* cppi_dump_tx(4, tx_ch, "/complete"); */ | ||
| 1216 | |||
| 1217 | /* transfer more, or report completion */ | ||
| 1218 | if (tx_ch->offset >= tx_ch->buf_len) { | ||
| 1219 | tx_ch->head = NULL; | ||
| 1220 | tx_ch->tail = NULL; | ||
| 1221 | tx_ch->channel.status = MUSB_DMA_STATUS_FREE; | ||
| 1222 | |||
| 1223 | hw_ep = tx_ch->hw_ep; | ||
| 1224 | |||
| 1225 | /* Peripheral role never repurposes the | ||
| 1226 | * endpoint, so immediate completion is | ||
| 1227 | * safe. Host role waits for the fifo | ||
| 1228 | * to empty (TXPKTRDY irq) before going | ||
| 1229 | * to the next queued bulk transfer. | ||
| 1230 | */ | ||
| 1231 | if (is_host_active(cppi->musb)) { | ||
| 1232 | #if 0 | ||
| 1233 | /* WORKAROUND because we may | ||
| 1234 | * not always get TXKPTRDY ... | ||
| 1235 | */ | ||
| 1236 | int csr; | ||
| 1237 | |||
| 1238 | csr = musb_readw(hw_ep->regs, | ||
| 1239 | MUSB_TXCSR); | ||
| 1240 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
| 1241 | #endif | ||
| 1242 | completed = false; | ||
| 1243 | } | ||
| 1244 | if (completed) | ||
| 1245 | musb_dma_completion(musb, index + 1, 1); | ||
| 1246 | |||
| 1247 | } else { | ||
| 1248 | /* Bigger transfer than we could fit in | ||
| 1249 | * that first batch of descriptors... | ||
| 1250 | */ | ||
| 1251 | cppi_next_tx_segment(musb, tx_ch); | ||
| 1252 | } | ||
| 1253 | } else | ||
| 1254 | tx_ch->head = bd; | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | /* Start processing the RX block */ | ||
| 1258 | for (index = 0; rx; rx = rx >> 1, index++) { | ||
| 1259 | |||
| 1260 | if (rx & 1) { | ||
| 1261 | struct cppi_channel *rx_ch; | ||
| 1262 | |||
| 1263 | rx_ch = cppi->rx + index; | ||
| 1264 | |||
| 1265 | /* let incomplete dma segments finish */ | ||
| 1266 | if (!cppi_rx_scan(cppi, index)) | ||
| 1267 | continue; | ||
| 1268 | |||
| 1269 | /* start another dma segment if needed */ | ||
| 1270 | if (rx_ch->channel.actual_len != rx_ch->buf_len | ||
| 1271 | && rx_ch->channel.actual_len | ||
| 1272 | == rx_ch->offset) { | ||
| 1273 | cppi_next_rx_segment(musb, rx_ch, 1); | ||
| 1274 | continue; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | /* all segments completed! */ | ||
| 1278 | rx_ch->channel.status = MUSB_DMA_STATUS_FREE; | ||
| 1279 | |||
| 1280 | hw_ep = rx_ch->hw_ep; | ||
| 1281 | |||
| 1282 | core_rxirq_disable(tibase, index + 1); | ||
| 1283 | musb_dma_completion(musb, index + 1, 0); | ||
| 1284 | } | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | /* write to CPPI EOI register to re-enable interrupts */ | ||
| 1288 | musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | /* Instantiate a software object representing a DMA controller. */ | ||
| 1292 | struct dma_controller *__init | ||
| 1293 | dma_controller_create(struct musb *musb, void __iomem *mregs) | ||
| 1294 | { | ||
| 1295 | struct cppi *controller; | ||
| 1296 | |||
| 1297 | controller = kzalloc(sizeof *controller, GFP_KERNEL); | ||
| 1298 | if (!controller) | ||
| 1299 | return NULL; | ||
| 1300 | |||
| 1301 | controller->mregs = mregs; | ||
| 1302 | controller->tibase = mregs - DAVINCI_BASE_OFFSET; | ||
| 1303 | |||
| 1304 | controller->musb = musb; | ||
| 1305 | controller->controller.start = cppi_controller_start; | ||
| 1306 | controller->controller.stop = cppi_controller_stop; | ||
| 1307 | controller->controller.channel_alloc = cppi_channel_allocate; | ||
| 1308 | controller->controller.channel_release = cppi_channel_release; | ||
| 1309 | controller->controller.channel_program = cppi_channel_program; | ||
| 1310 | controller->controller.channel_abort = cppi_channel_abort; | ||
| 1311 | |||
| 1312 | /* NOTE: allocating from on-chip SRAM would give the least | ||
| 1313 | * contention for memory access, if that ever matters here. | ||
| 1314 | */ | ||
| 1315 | |||
| 1316 | /* setup BufferPool */ | ||
| 1317 | controller->pool = dma_pool_create("cppi", | ||
| 1318 | controller->musb->controller, | ||
| 1319 | sizeof(struct cppi_descriptor), | ||
| 1320 | CPPI_DESCRIPTOR_ALIGN, 0); | ||
| 1321 | if (!controller->pool) { | ||
| 1322 | kfree(controller); | ||
| 1323 | return NULL; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | return &controller->controller; | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | /* | ||
| 1330 | * Destroy a previously-instantiated DMA controller. | ||
| 1331 | */ | ||
| 1332 | void dma_controller_destroy(struct dma_controller *c) | ||
| 1333 | { | ||
| 1334 | struct cppi *cppi; | ||
| 1335 | |||
| 1336 | cppi = container_of(c, struct cppi, controller); | ||
| 1337 | |||
| 1338 | /* assert: caller stopped the controller first */ | ||
| 1339 | dma_pool_destroy(cppi->pool); | ||
| 1340 | |||
| 1341 | kfree(cppi); | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | /* | ||
| 1345 | * Context: controller irqlocked, endpoint selected | ||
| 1346 | */ | ||
| 1347 | static int cppi_channel_abort(struct dma_channel *channel) | ||
| 1348 | { | ||
| 1349 | struct cppi_channel *cppi_ch; | ||
| 1350 | struct cppi *controller; | ||
| 1351 | void __iomem *mbase; | ||
| 1352 | void __iomem *tibase; | ||
| 1353 | void __iomem *regs; | ||
| 1354 | u32 value; | ||
| 1355 | struct cppi_descriptor *queue; | ||
| 1356 | |||
| 1357 | cppi_ch = container_of(channel, struct cppi_channel, channel); | ||
| 1358 | |||
| 1359 | controller = cppi_ch->controller; | ||
| 1360 | |||
| 1361 | switch (channel->status) { | ||
| 1362 | case MUSB_DMA_STATUS_BUS_ABORT: | ||
| 1363 | case MUSB_DMA_STATUS_CORE_ABORT: | ||
| 1364 | /* from RX or TX fault irq handler */ | ||
| 1365 | case MUSB_DMA_STATUS_BUSY: | ||
| 1366 | /* the hardware needs shutting down */ | ||
| 1367 | regs = cppi_ch->hw_ep->regs; | ||
| 1368 | break; | ||
| 1369 | case MUSB_DMA_STATUS_UNKNOWN: | ||
| 1370 | case MUSB_DMA_STATUS_FREE: | ||
| 1371 | return 0; | ||
| 1372 | default: | ||
| 1373 | return -EINVAL; | ||
| 1374 | } | ||
| 1375 | |||
| 1376 | if (!cppi_ch->transmit && cppi_ch->head) | ||
| 1377 | cppi_dump_rxq(3, "/abort", cppi_ch); | ||
| 1378 | |||
| 1379 | mbase = controller->mregs; | ||
| 1380 | tibase = controller->tibase; | ||
| 1381 | |||
| 1382 | queue = cppi_ch->head; | ||
| 1383 | cppi_ch->head = NULL; | ||
| 1384 | cppi_ch->tail = NULL; | ||
| 1385 | |||
| 1386 | /* REVISIT should rely on caller having done this, | ||
| 1387 | * and caller should rely on us not changing it. | ||
| 1388 | * peripheral code is safe ... check host too. | ||
| 1389 | */ | ||
| 1390 | musb_ep_select(mbase, cppi_ch->index + 1); | ||
| 1391 | |||
| 1392 | if (cppi_ch->transmit) { | ||
| 1393 | struct cppi_tx_stateram __iomem *tx_ram; | ||
| 1394 | int enabled; | ||
| 1395 | |||
| 1396 | /* mask interrupts raised to signal teardown complete. */ | ||
| 1397 | enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) | ||
| 1398 | & (1 << cppi_ch->index); | ||
| 1399 | if (enabled) | ||
| 1400 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, | ||
| 1401 | (1 << cppi_ch->index)); | ||
| 1402 | |||
| 1403 | /* REVISIT put timeouts on these controller handshakes */ | ||
| 1404 | |||
| 1405 | cppi_dump_tx(6, cppi_ch, " (teardown)"); | ||
| 1406 | |||
| 1407 | /* teardown DMA engine then usb core */ | ||
| 1408 | do { | ||
| 1409 | value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); | ||
| 1410 | } while (!(value & CPPI_TEAR_READY)); | ||
| 1411 | musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); | ||
| 1412 | |||
| 1413 | tx_ram = cppi_ch->state_ram; | ||
| 1414 | do { | ||
| 1415 | value = musb_readl(&tx_ram->tx_complete, 0); | ||
| 1416 | } while (0xFFFFFFFC != value); | ||
| 1417 | musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); | ||
| 1418 | |||
| 1419 | /* FIXME clean up the transfer state ... here? | ||
| 1420 | * the completion routine should get called with | ||
| 1421 | * an appropriate status code. | ||
| 1422 | */ | ||
| 1423 | |||
| 1424 | value = musb_readw(regs, MUSB_TXCSR); | ||
| 1425 | value &= ~MUSB_TXCSR_DMAENAB; | ||
| 1426 | value |= MUSB_TXCSR_FLUSHFIFO; | ||
| 1427 | musb_writew(regs, MUSB_TXCSR, value); | ||
| 1428 | musb_writew(regs, MUSB_TXCSR, value); | ||
| 1429 | |||
| 1430 | /* re-enable interrupt */ | ||
| 1431 | if (enabled) | ||
| 1432 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | ||
| 1433 | (1 << cppi_ch->index)); | ||
| 1434 | |||
| 1435 | /* While we scrub the TX state RAM, ensure that we clean | ||
| 1436 | * up any interrupt that's currently asserted: | ||
| 1437 | * 1. Write to completion Ptr value 0x1(bit 0 set) | ||
| 1438 | * (write back mode) | ||
| 1439 | * 2. Write to completion Ptr value 0x0(bit 0 cleared) | ||
| 1440 | * (compare mode) | ||
| 1441 | * Value written is compared(for bits 31:2) and when | ||
| 1442 | * equal, interrupt is deasserted. | ||
| 1443 | */ | ||
| 1444 | cppi_reset_tx(tx_ram, 1); | ||
| 1445 | musb_writel(&tx_ram->tx_complete, 0, 0); | ||
| 1446 | |||
| 1447 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); | ||
| 1448 | |||
| 1449 | /* REVISIT tx side _should_ clean up the same way | ||
| 1450 | * as the RX side ... this does no cleanup at all! | ||
| 1451 | */ | ||
| 1452 | |||
| 1453 | } else /* RX */ { | ||
| 1454 | u16 csr; | ||
| 1455 | |||
| 1456 | /* NOTE: docs don't guarantee any of this works ... we | ||
| 1457 | * expect that if the usb core stops telling the cppi core | ||
| 1458 | * to pull more data from it, then it'll be safe to flush | ||
| 1459 | * current RX DMA state iff any pending fifo transfer is done. | ||
| 1460 | */ | ||
| 1461 | |||
| 1462 | core_rxirq_disable(tibase, cppi_ch->index + 1); | ||
| 1463 | |||
| 1464 | /* for host, ensure ReqPkt is never set again */ | ||
| 1465 | if (is_host_active(cppi_ch->controller->musb)) { | ||
| 1466 | value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | ||
| 1467 | value &= ~((0x3) << (cppi_ch->index * 2)); | ||
| 1468 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | csr = musb_readw(regs, MUSB_RXCSR); | ||
| 1472 | |||
| 1473 | /* for host, clear (just) ReqPkt at end of current packet(s) */ | ||
| 1474 | if (is_host_active(cppi_ch->controller->musb)) { | ||
| 1475 | csr |= MUSB_RXCSR_H_WZC_BITS; | ||
| 1476 | csr &= ~MUSB_RXCSR_H_REQPKT; | ||
| 1477 | } else | ||
| 1478 | csr |= MUSB_RXCSR_P_WZC_BITS; | ||
| 1479 | |||
| 1480 | /* clear dma enable */ | ||
| 1481 | csr &= ~(MUSB_RXCSR_DMAENAB); | ||
| 1482 | musb_writew(regs, MUSB_RXCSR, csr); | ||
| 1483 | csr = musb_readw(regs, MUSB_RXCSR); | ||
| 1484 | |||
| 1485 | /* Quiesce: wait for current dma to finish (if not cleanup). | ||
| 1486 | * We can't use bit zero of stateram->rx_sop, since that | ||
| 1487 | * refers to an entire "DMA packet" not just emptying the | ||
| 1488 | * current fifo. Most segments need multiple usb packets. | ||
| 1489 | */ | ||
| 1490 | if (channel->status == MUSB_DMA_STATUS_BUSY) | ||
| 1491 | udelay(50); | ||
| 1492 | |||
| 1493 | /* scan the current list, reporting any data that was | ||
| 1494 | * transferred and acking any IRQ | ||
| 1495 | */ | ||
| 1496 | cppi_rx_scan(controller, cppi_ch->index); | ||
| 1497 | |||
| 1498 | /* clobber the existing state once it's idle | ||
| 1499 | * | ||
| 1500 | * NOTE: arguably, we should also wait for all the other | ||
| 1501 | * RX channels to quiesce (how??) and then temporarily | ||
| 1502 | * disable RXCPPI_CTRL_REG ... but it seems that we can | ||
| 1503 | * rely on the controller restarting from state ram, with | ||
| 1504 | * only RXCPPI_BUFCNT state being bogus. BUFCNT will | ||
| 1505 | * correct itself after the next DMA transfer though. | ||
| 1506 | * | ||
| 1507 | * REVISIT does using rndis mode change that? | ||
| 1508 | */ | ||
| 1509 | cppi_reset_rx(cppi_ch->state_ram); | ||
| 1510 | |||
| 1511 | /* next DMA request _should_ load cppi head ptr */ | ||
| 1512 | |||
| 1513 | /* ... we don't "free" that list, only mutate it in place. */ | ||
| 1514 | cppi_dump_rx(5, cppi_ch, " (done abort)"); | ||
| 1515 | |||
| 1516 | /* clean up previously pending bds */ | ||
| 1517 | cppi_bd_free(cppi_ch, cppi_ch->last_processed); | ||
| 1518 | cppi_ch->last_processed = NULL; | ||
| 1519 | |||
| 1520 | while (queue) { | ||
| 1521 | struct cppi_descriptor *tmp = queue->next; | ||
| 1522 | |||
| 1523 | cppi_bd_free(cppi_ch, queue); | ||
| 1524 | queue = tmp; | ||
| 1525 | } | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | channel->status = MUSB_DMA_STATUS_FREE; | ||
| 1529 | cppi_ch->buf_dma = 0; | ||
| 1530 | cppi_ch->offset = 0; | ||
| 1531 | cppi_ch->buf_len = 0; | ||
| 1532 | cppi_ch->maxpacket = 0; | ||
| 1533 | return 0; | ||
| 1534 | } | ||
| 1535 | |||
| 1536 | /* TBD Queries: | ||
| 1537 | * | ||
| 1538 | * Power Management ... probably turn off cppi during suspend, restart; | ||
| 1539 | * check state ram? Clocking is presumably shared with usb core. | ||
| 1540 | */ | ||
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h new file mode 100644 index 000000000000..fc5216b5d2c5 --- /dev/null +++ b/drivers/usb/musb/cppi_dma.h | |||
| @@ -0,0 +1,133 @@ | |||
| 1 | /* Copyright (C) 2005-2006 by Texas Instruments */ | ||
| 2 | |||
| 3 | #ifndef _CPPI_DMA_H_ | ||
| 4 | #define _CPPI_DMA_H_ | ||
| 5 | |||
| 6 | #include <linux/slab.h> | ||
| 7 | #include <linux/list.h> | ||
| 8 | #include <linux/smp_lock.h> | ||
| 9 | #include <linux/errno.h> | ||
| 10 | #include <linux/dmapool.h> | ||
| 11 | |||
| 12 | #include "musb_dma.h" | ||
| 13 | #include "musb_core.h" | ||
| 14 | |||
| 15 | |||
| 16 | /* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers | ||
| 17 | * would seem to be shared with the TUSB6020 (over VLYNQ). | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include "davinci.h" | ||
| 21 | |||
| 22 | |||
| 23 | /* CPPI RX/TX state RAM */ | ||
| 24 | |||
| 25 | struct cppi_tx_stateram { | ||
| 26 | u32 tx_head; /* "DMA packet" head descriptor */ | ||
| 27 | u32 tx_buf; | ||
| 28 | u32 tx_current; /* current descriptor */ | ||
| 29 | u32 tx_buf_current; | ||
| 30 | u32 tx_info; /* flags, remaining buflen */ | ||
| 31 | u32 tx_rem_len; | ||
| 32 | u32 tx_dummy; /* unused */ | ||
| 33 | u32 tx_complete; | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct cppi_rx_stateram { | ||
| 37 | u32 rx_skipbytes; | ||
| 38 | u32 rx_head; | ||
| 39 | u32 rx_sop; /* "DMA packet" head descriptor */ | ||
| 40 | u32 rx_current; /* current descriptor */ | ||
| 41 | u32 rx_buf_current; | ||
| 42 | u32 rx_len_len; | ||
| 43 | u32 rx_cnt_cnt; | ||
| 44 | u32 rx_complete; | ||
| 45 | }; | ||
| 46 | |||
| 47 | /* hw_options bits in CPPI buffer descriptors */ | ||
| 48 | #define CPPI_SOP_SET ((u32)(1 << 31)) | ||
| 49 | #define CPPI_EOP_SET ((u32)(1 << 30)) | ||
| 50 | #define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */ | ||
| 51 | #define CPPI_EOQ_MASK ((u32)(1 << 28)) | ||
| 52 | #define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */ | ||
| 53 | #define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */ | ||
| 54 | |||
| 55 | #define CPPI_RECV_PKTLEN_MASK 0xFFFF | ||
| 56 | #define CPPI_BUFFER_LEN_MASK 0xFFFF | ||
| 57 | |||
| 58 | #define CPPI_TEAR_READY ((u32)(1 << 31)) | ||
| 59 | |||
| 60 | /* CPPI data structure definitions */ | ||
| 61 | |||
| 62 | #define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */ | ||
| 63 | |||
| 64 | struct cppi_descriptor { | ||
| 65 | /* hardware overlay */ | ||
| 66 | u32 hw_next; /* next buffer descriptor Pointer */ | ||
| 67 | u32 hw_bufp; /* i/o buffer pointer */ | ||
| 68 | u32 hw_off_len; /* buffer_offset16, buffer_length16 */ | ||
| 69 | u32 hw_options; /* flags: SOP, EOP etc*/ | ||
| 70 | |||
| 71 | struct cppi_descriptor *next; | ||
| 72 | dma_addr_t dma; /* address of this descriptor */ | ||
| 73 | u32 buflen; /* for RX: original buffer length */ | ||
| 74 | } __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN))); | ||
| 75 | |||
| 76 | |||
| 77 | struct cppi; | ||
| 78 | |||
| 79 | /* CPPI Channel Control structure */ | ||
| 80 | struct cppi_channel { | ||
| 81 | struct dma_channel channel; | ||
| 82 | |||
| 83 | /* back pointer to the DMA controller structure */ | ||
| 84 | struct cppi *controller; | ||
| 85 | |||
| 86 | /* which direction of which endpoint? */ | ||
| 87 | struct musb_hw_ep *hw_ep; | ||
| 88 | bool transmit; | ||
| 89 | u8 index; | ||
| 90 | |||
| 91 | /* DMA modes: RNDIS or "transparent" */ | ||
| 92 | u8 is_rndis; | ||
| 93 | |||
| 94 | /* book keeping for current transfer request */ | ||
| 95 | dma_addr_t buf_dma; | ||
| 96 | u32 buf_len; | ||
| 97 | u32 maxpacket; | ||
| 98 | u32 offset; /* dma requested */ | ||
| 99 | |||
| 100 | void __iomem *state_ram; /* CPPI state */ | ||
| 101 | |||
| 102 | struct cppi_descriptor *freelist; | ||
| 103 | |||
| 104 | /* BD management fields */ | ||
| 105 | struct cppi_descriptor *head; | ||
| 106 | struct cppi_descriptor *tail; | ||
| 107 | struct cppi_descriptor *last_processed; | ||
| 108 | |||
| 109 | /* use tx_complete in host role to track endpoints waiting for | ||
| 110 | * FIFONOTEMPTY to clear. | ||
| 111 | */ | ||
| 112 | struct list_head tx_complete; | ||
| 113 | }; | ||
| 114 | |||
| 115 | /* CPPI DMA controller object */ | ||
| 116 | struct cppi { | ||
| 117 | struct dma_controller controller; | ||
| 118 | struct musb *musb; | ||
| 119 | void __iomem *mregs; /* Mentor regs */ | ||
| 120 | void __iomem *tibase; /* TI/CPPI regs */ | ||
| 121 | |||
| 122 | struct cppi_channel tx[MUSB_C_NUM_EPT - 1]; | ||
| 123 | struct cppi_channel rx[MUSB_C_NUM_EPR - 1]; | ||
| 124 | |||
| 125 | struct dma_pool *pool; | ||
| 126 | |||
| 127 | struct list_head tx_complete; | ||
| 128 | }; | ||
| 129 | |||
| 130 | /* irq handling hook */ | ||
| 131 | extern void cppi_completion(struct musb *, u32 rx, u32 tx); | ||
| 132 | |||
| 133 | #endif /* end of ifndef _CPPI_DMA_H_ */ | ||
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c new file mode 100644 index 000000000000..75baf181a8cd --- /dev/null +++ b/drivers/usb/musb/davinci.c | |||
| @@ -0,0 +1,462 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 3 | * | ||
| 4 | * This file is part of the Inventra Controller Driver for Linux. | ||
| 5 | * | ||
| 6 | * The Inventra Controller Driver for Linux is free software; you | ||
| 7 | * can redistribute it and/or modify it under the terms of the GNU | ||
| 8 | * General Public License version 2 as published by the Free Software | ||
| 9 | * Foundation. | ||
| 10 | * | ||
| 11 | * The Inventra Controller Driver for Linux is distributed in | ||
| 12 | * the hope that it will be useful, but WITHOUT ANY WARRANTY; | ||
| 13 | * without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | ||
| 15 | * License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with The Inventra Controller Driver for Linux ; if not, | ||
| 19 | * write to the Free Software Foundation, Inc., 59 Temple Place, | ||
| 20 | * Suite 330, Boston, MA 02111-1307 USA | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/sched.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/list.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | #include <linux/clk.h> | ||
| 32 | #include <linux/io.h> | ||
| 33 | |||
| 34 | #include <asm/arch/hardware.h> | ||
| 35 | #include <asm/arch/memory.h> | ||
| 36 | #include <asm/arch/gpio.h> | ||
| 37 | #include <asm/mach-types.h> | ||
| 38 | |||
| 39 | #include "musb_core.h" | ||
| 40 | |||
| 41 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
| 42 | #include <asm/arch/i2c-client.h> | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #include "davinci.h" | ||
| 46 | #include "cppi_dma.h" | ||
| 47 | |||
| 48 | |||
| 49 | /* REVISIT (PM) we should be able to keep the PHY in low power mode most | ||
| 50 | * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 | ||
| 51 | * and, when in host mode, autosuspending idle root ports... PHYPLLON | ||
| 52 | * (overriding SUSPENDM?) then likely needs to stay off. | ||
| 53 | */ | ||
| 54 | |||
| 55 | static inline void phy_on(void) | ||
| 56 | { | ||
| 57 | /* start the on-chip PHY and its PLL */ | ||
| 58 | __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON, | ||
| 59 | (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR)); | ||
| 60 | while ((__raw_readl((void __force __iomem *) | ||
| 61 | IO_ADDRESS(USBPHY_CTL_PADDR)) | ||
| 62 | & USBPHY_PHYCLKGD) == 0) | ||
| 63 | cpu_relax(); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void phy_off(void) | ||
| 67 | { | ||
| 68 | /* powerdown the on-chip PHY and its oscillator */ | ||
| 69 | __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *) | ||
| 70 | IO_ADDRESS(USBPHY_CTL_PADDR)); | ||
| 71 | } | ||
| 72 | |||
| 73 | static int dma_off = 1; | ||
| 74 | |||
| 75 | void musb_platform_enable(struct musb *musb) | ||
| 76 | { | ||
| 77 | u32 tmp, old, val; | ||
| 78 | |||
| 79 | /* workaround: setup irqs through both register sets */ | ||
| 80 | tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) | ||
| 81 | << DAVINCI_USB_TXINT_SHIFT; | ||
| 82 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); | ||
| 83 | old = tmp; | ||
| 84 | tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) | ||
| 85 | << DAVINCI_USB_RXINT_SHIFT; | ||
| 86 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); | ||
| 87 | tmp |= old; | ||
| 88 | |||
| 89 | val = ~MUSB_INTR_SOF; | ||
| 90 | tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); | ||
| 91 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); | ||
| 92 | |||
| 93 | if (is_dma_capable() && !dma_off) | ||
| 94 | printk(KERN_WARNING "%s %s: dma not reactivated\n", | ||
| 95 | __FILE__, __func__); | ||
| 96 | else | ||
| 97 | dma_off = 0; | ||
| 98 | |||
| 99 | /* force a DRVVBUS irq so we can start polling for ID change */ | ||
| 100 | if (is_otg_enabled(musb)) | ||
| 101 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, | ||
| 102 | DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); | ||
| 103 | } | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Disable the HDRC and flush interrupts | ||
| 107 | */ | ||
| 108 | void musb_platform_disable(struct musb *musb) | ||
| 109 | { | ||
| 110 | /* because we don't set CTRLR.UINT, "important" to: | ||
| 111 | * - not read/write INTRUSB/INTRUSBE | ||
| 112 | * - (except during initial setup, as workaround) | ||
| 113 | * - use INTSETR/INTCLRR instead | ||
| 114 | */ | ||
| 115 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, | ||
| 116 | DAVINCI_USB_USBINT_MASK | ||
| 117 | | DAVINCI_USB_TXINT_MASK | ||
| 118 | | DAVINCI_USB_RXINT_MASK); | ||
| 119 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 120 | musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); | ||
| 121 | |||
| 122 | if (is_dma_capable() && !dma_off) | ||
| 123 | WARNING("dma still active\n"); | ||
| 124 | } | ||
| 125 | |||
| 126 | |||
| 127 | /* REVISIT it's not clear whether DaVinci can support full OTG. */ | ||
| 128 | |||
| 129 | static int vbus_state = -1; | ||
| 130 | |||
| 131 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 132 | #define portstate(stmt) stmt | ||
| 133 | #else | ||
| 134 | #define portstate(stmt) | ||
| 135 | #endif | ||
| 136 | |||
| 137 | |||
| 138 | /* VBUS SWITCHING IS BOARD-SPECIFIC */ | ||
| 139 | |||
| 140 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
| 141 | #ifndef CONFIG_MACH_DAVINCI_EVM_OTG | ||
| 142 | |||
| 143 | /* I2C operations are always synchronous, and require a task context. | ||
| 144 | * With unloaded systems, using the shared workqueue seems to suffice | ||
| 145 | * to satisfy the 100msec A_WAIT_VRISE timeout... | ||
| 146 | */ | ||
| 147 | static void evm_deferred_drvvbus(struct work_struct *ignored) | ||
| 148 | { | ||
| 149 | davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state); | ||
| 150 | vbus_state = !vbus_state; | ||
| 151 | } | ||
| 152 | static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); | ||
| 153 | |||
| 154 | #endif /* modified board */ | ||
| 155 | #endif /* EVM */ | ||
| 156 | |||
| 157 | static void davinci_source_power(struct musb *musb, int is_on, int immediate) | ||
| 158 | { | ||
| 159 | if (is_on) | ||
| 160 | is_on = 1; | ||
| 161 | |||
| 162 | if (vbus_state == is_on) | ||
| 163 | return; | ||
| 164 | vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ | ||
| 165 | |||
| 166 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
| 167 | if (machine_is_davinci_evm()) { | ||
| 168 | #ifdef CONFIG_MACH_DAVINCI_EVM_OTG | ||
| 169 | /* modified EVM board switching VBUS with GPIO(6) not I2C | ||
| 170 | * NOTE: PINMUX0.RGB888 (bit23) must be clear | ||
| 171 | */ | ||
| 172 | if (is_on) | ||
| 173 | gpio_set(GPIO(6)); | ||
| 174 | else | ||
| 175 | gpio_clear(GPIO(6)); | ||
| 176 | immediate = 1; | ||
| 177 | #else | ||
| 178 | if (immediate) | ||
| 179 | davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on); | ||
| 180 | else | ||
| 181 | schedule_work(&evm_vbus_work); | ||
| 182 | #endif | ||
| 183 | } | ||
| 184 | #endif | ||
| 185 | if (immediate) | ||
| 186 | vbus_state = is_on; | ||
| 187 | } | ||
| 188 | |||
| 189 | static void davinci_set_vbus(struct musb *musb, int is_on) | ||
| 190 | { | ||
| 191 | WARN_ON(is_on && is_peripheral_active(musb)); | ||
| 192 | davinci_source_power(musb, is_on, 0); | ||
| 193 | } | ||
| 194 | |||
| 195 | |||
| 196 | #define POLL_SECONDS 2 | ||
| 197 | |||
| 198 | static struct timer_list otg_workaround; | ||
| 199 | |||
| 200 | static void otg_timer(unsigned long _musb) | ||
| 201 | { | ||
| 202 | struct musb *musb = (void *)_musb; | ||
| 203 | void __iomem *mregs = musb->mregs; | ||
| 204 | u8 devctl; | ||
| 205 | unsigned long flags; | ||
| 206 | |||
| 207 | /* We poll because DaVinci's won't expose several OTG-critical | ||
| 208 | * status change events (from the transceiver) otherwise. | ||
| 209 | */ | ||
| 210 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 211 | DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); | ||
| 212 | |||
| 213 | spin_lock_irqsave(&musb->lock, flags); | ||
| 214 | switch (musb->xceiv.state) { | ||
| 215 | case OTG_STATE_A_WAIT_VFALL: | ||
| 216 | /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL | ||
| 217 | * seems to mis-handle session "start" otherwise (or in our | ||
| 218 | * case "recover"), in routine "VBUS was valid by the time | ||
| 219 | * VBUSERR got reported during enumeration" cases. | ||
| 220 | */ | ||
| 221 | if (devctl & MUSB_DEVCTL_VBUS) { | ||
| 222 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
| 223 | break; | ||
| 224 | } | ||
| 225 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
| 226 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, | ||
| 227 | MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); | ||
| 228 | break; | ||
| 229 | case OTG_STATE_B_IDLE: | ||
| 230 | if (!is_peripheral_enabled(musb)) | ||
| 231 | break; | ||
| 232 | |||
| 233 | /* There's no ID-changed IRQ, so we have no good way to tell | ||
| 234 | * when to switch to the A-Default state machine (by setting | ||
| 235 | * the DEVCTL.SESSION flag). | ||
| 236 | * | ||
| 237 | * Workaround: whenever we're in B_IDLE, try setting the | ||
| 238 | * session flag every few seconds. If it works, ID was | ||
| 239 | * grounded and we're now in the A-Default state machine. | ||
| 240 | * | ||
| 241 | * NOTE setting the session flag is _supposed_ to trigger | ||
| 242 | * SRP, but clearly it doesn't. | ||
| 243 | */ | ||
| 244 | musb_writeb(mregs, MUSB_DEVCTL, | ||
| 245 | devctl | MUSB_DEVCTL_SESSION); | ||
| 246 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 247 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
| 248 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
| 249 | else | ||
| 250 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 251 | break; | ||
| 252 | default: | ||
| 253 | break; | ||
| 254 | } | ||
| 255 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 256 | } | ||
| 257 | |||
| 258 | static irqreturn_t davinci_interrupt(int irq, void *__hci) | ||
| 259 | { | ||
| 260 | unsigned long flags; | ||
| 261 | irqreturn_t retval = IRQ_NONE; | ||
| 262 | struct musb *musb = __hci; | ||
| 263 | void __iomem *tibase = musb->ctrl_base; | ||
| 264 | u32 tmp; | ||
| 265 | |||
| 266 | spin_lock_irqsave(&musb->lock, flags); | ||
| 267 | |||
| 268 | /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through | ||
| 269 | * the Mentor registers (except for setup), use the TI ones and EOI. | ||
| 270 | * | ||
| 271 | * Docs describe irq "vector" registers asociated with the CPPI and | ||
| 272 | * USB EOI registers. These hold a bitmask corresponding to the | ||
| 273 | * current IRQ, not an irq handler address. Would using those bits | ||
| 274 | * resolve some of the races observed in this dispatch code?? | ||
| 275 | */ | ||
| 276 | |||
| 277 | /* CPPI interrupts share the same IRQ line, but have their own | ||
| 278 | * mask, state, "vector", and EOI registers. | ||
| 279 | */ | ||
| 280 | if (is_cppi_enabled()) { | ||
| 281 | u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); | ||
| 282 | u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); | ||
| 283 | |||
| 284 | if (cppi_tx || cppi_rx) { | ||
| 285 | DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx); | ||
| 286 | cppi_completion(musb, cppi_rx, cppi_tx); | ||
| 287 | retval = IRQ_HANDLED; | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | /* ack and handle non-CPPI interrupts */ | ||
| 292 | tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); | ||
| 293 | musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); | ||
| 294 | DBG(4, "IRQ %08x\n", tmp); | ||
| 295 | |||
| 296 | musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) | ||
| 297 | >> DAVINCI_USB_RXINT_SHIFT; | ||
| 298 | musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) | ||
| 299 | >> DAVINCI_USB_TXINT_SHIFT; | ||
| 300 | musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) | ||
| 301 | >> DAVINCI_USB_USBINT_SHIFT; | ||
| 302 | |||
| 303 | /* DRVVBUS irqs are the only proxy we have (a very poor one!) for | ||
| 304 | * DaVinci's missing ID change IRQ. We need an ID change IRQ to | ||
| 305 | * switch appropriately between halves of the OTG state machine. | ||
| 306 | * Managing DEVCTL.SESSION per Mentor docs requires we know its | ||
| 307 | * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. | ||
| 308 | * Also, DRVVBUS pulses for SRP (but not at 5V) ... | ||
| 309 | */ | ||
| 310 | if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { | ||
| 311 | int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); | ||
| 312 | void __iomem *mregs = musb->mregs; | ||
| 313 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 314 | int err = musb->int_usb & MUSB_INTR_VBUSERROR; | ||
| 315 | |||
| 316 | err = is_host_enabled(musb) | ||
| 317 | && (musb->int_usb & MUSB_INTR_VBUSERROR); | ||
| 318 | if (err) { | ||
| 319 | /* The Mentor core doesn't debounce VBUS as needed | ||
| 320 | * to cope with device connect current spikes. This | ||
| 321 | * means it's not uncommon for bus-powered devices | ||
| 322 | * to get VBUS errors during enumeration. | ||
| 323 | * | ||
| 324 | * This is a workaround, but newer RTL from Mentor | ||
| 325 | * seems to allow a better one: "re"starting sessions | ||
| 326 | * without waiting (on EVM, a **long** time) for VBUS | ||
| 327 | * to stop registering in devctl. | ||
| 328 | */ | ||
| 329 | musb->int_usb &= ~MUSB_INTR_VBUSERROR; | ||
| 330 | musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; | ||
| 331 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
| 332 | WARNING("VBUS error workaround (delay coming)\n"); | ||
| 333 | } else if (is_host_enabled(musb) && drvvbus) { | ||
| 334 | musb->is_active = 1; | ||
| 335 | MUSB_HST_MODE(musb); | ||
| 336 | musb->xceiv.default_a = 1; | ||
| 337 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
| 338 | portstate(musb->port1_status |= USB_PORT_STAT_POWER); | ||
| 339 | del_timer(&otg_workaround); | ||
| 340 | } else { | ||
| 341 | musb->is_active = 0; | ||
| 342 | MUSB_DEV_MODE(musb); | ||
| 343 | musb->xceiv.default_a = 0; | ||
| 344 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 345 | portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); | ||
| 346 | } | ||
| 347 | |||
| 348 | /* NOTE: this must complete poweron within 100 msec */ | ||
| 349 | davinci_source_power(musb, drvvbus, 0); | ||
| 350 | DBG(2, "VBUS %s (%s)%s, devctl %02x\n", | ||
| 351 | drvvbus ? "on" : "off", | ||
| 352 | otg_state_string(musb), | ||
| 353 | err ? " ERROR" : "", | ||
| 354 | devctl); | ||
| 355 | retval = IRQ_HANDLED; | ||
| 356 | } | ||
| 357 | |||
| 358 | if (musb->int_tx || musb->int_rx || musb->int_usb) | ||
| 359 | retval |= musb_interrupt(musb); | ||
| 360 | |||
| 361 | /* irq stays asserted until EOI is written */ | ||
| 362 | musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); | ||
| 363 | |||
| 364 | /* poll for ID change */ | ||
| 365 | if (is_otg_enabled(musb) | ||
| 366 | && musb->xceiv.state == OTG_STATE_B_IDLE) | ||
| 367 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
| 368 | |||
| 369 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 370 | |||
| 371 | /* REVISIT we sometimes get unhandled IRQs | ||
| 372 | * (e.g. ep0). not clear why... | ||
| 373 | */ | ||
| 374 | if (retval != IRQ_HANDLED) | ||
| 375 | DBG(5, "unhandled? %08x\n", tmp); | ||
| 376 | return IRQ_HANDLED; | ||
| 377 | } | ||
| 378 | |||
| 379 | int __init musb_platform_init(struct musb *musb) | ||
| 380 | { | ||
| 381 | void __iomem *tibase = musb->ctrl_base; | ||
| 382 | u32 revision; | ||
| 383 | |||
| 384 | musb->mregs += DAVINCI_BASE_OFFSET; | ||
| 385 | #if 0 | ||
| 386 | /* REVISIT there's something odd about clocking, this | ||
| 387 | * didn't appear do the job ... | ||
| 388 | */ | ||
| 389 | musb->clock = clk_get(pDevice, "usb"); | ||
| 390 | if (IS_ERR(musb->clock)) | ||
| 391 | return PTR_ERR(musb->clock); | ||
| 392 | |||
| 393 | status = clk_enable(musb->clock); | ||
| 394 | if (status < 0) | ||
| 395 | return -ENODEV; | ||
| 396 | #endif | ||
| 397 | |||
| 398 | /* returns zero if e.g. not clocked */ | ||
| 399 | revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); | ||
| 400 | if (revision == 0) | ||
| 401 | return -ENODEV; | ||
| 402 | |||
| 403 | if (is_host_enabled(musb)) | ||
| 404 | setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); | ||
| 405 | |||
| 406 | musb->board_set_vbus = davinci_set_vbus; | ||
| 407 | davinci_source_power(musb, 0, 1); | ||
| 408 | |||
| 409 | /* reset the controller */ | ||
| 410 | musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); | ||
| 411 | |||
| 412 | /* start the on-chip PHY and its PLL */ | ||
| 413 | phy_on(); | ||
| 414 | |||
| 415 | msleep(5); | ||
| 416 | |||
| 417 | /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ | ||
| 418 | pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", | ||
| 419 | revision, __raw_readl((void __force __iomem *) | ||
| 420 | IO_ADDRESS(USBPHY_CTL_PADDR)), | ||
| 421 | musb_readb(tibase, DAVINCI_USB_CTRL_REG)); | ||
| 422 | |||
| 423 | musb->isr = davinci_interrupt; | ||
| 424 | return 0; | ||
| 425 | } | ||
| 426 | |||
| 427 | int musb_platform_exit(struct musb *musb) | ||
| 428 | { | ||
| 429 | if (is_host_enabled(musb)) | ||
| 430 | del_timer_sync(&otg_workaround); | ||
| 431 | |||
| 432 | davinci_source_power(musb, 0 /*off*/, 1); | ||
| 433 | |||
| 434 | /* delay, to avoid problems with module reload */ | ||
| 435 | if (is_host_enabled(musb) && musb->xceiv.default_a) { | ||
| 436 | int maxdelay = 30; | ||
| 437 | u8 devctl, warn = 0; | ||
| 438 | |||
| 439 | /* if there's no peripheral connected, this can take a | ||
| 440 | * long time to fall, especially on EVM with huge C133. | ||
| 441 | */ | ||
| 442 | do { | ||
| 443 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 444 | if (!(devctl & MUSB_DEVCTL_VBUS)) | ||
| 445 | break; | ||
| 446 | if ((devctl & MUSB_DEVCTL_VBUS) != warn) { | ||
| 447 | warn = devctl & MUSB_DEVCTL_VBUS; | ||
| 448 | DBG(1, "VBUS %d\n", | ||
| 449 | warn >> MUSB_DEVCTL_VBUS_SHIFT); | ||
| 450 | } | ||
| 451 | msleep(1000); | ||
| 452 | maxdelay--; | ||
| 453 | } while (maxdelay > 0); | ||
| 454 | |||
| 455 | /* in OTG mode, another host might be connected */ | ||
| 456 | if (devctl & MUSB_DEVCTL_VBUS) | ||
| 457 | DBG(1, "VBUS off timeout (devctl %02x)\n", devctl); | ||
| 458 | } | ||
| 459 | |||
| 460 | phy_off(); | ||
| 461 | return 0; | ||
| 462 | } | ||
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h new file mode 100644 index 000000000000..7fb6238e270f --- /dev/null +++ b/drivers/usb/musb/davinci.h | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 3 | * | ||
| 4 | * The Inventra Controller Driver for Linux is free software; you | ||
| 5 | * can redistribute it and/or modify it under the terms of the GNU | ||
| 6 | * General Public License version 2 as published by the Free Software | ||
| 7 | * Foundation. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __MUSB_HDRDF_H__ | ||
| 11 | #define __MUSB_HDRDF_H__ | ||
| 12 | |||
| 13 | /* | ||
| 14 | * DaVinci-specific definitions | ||
| 15 | */ | ||
| 16 | |||
| 17 | /* Integrated highspeed/otg PHY */ | ||
| 18 | #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) | ||
| 19 | #define USBPHY_PHYCLKGD (1 << 8) | ||
| 20 | #define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */ | ||
| 21 | #define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */ | ||
| 22 | #define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */ | ||
| 23 | #define USBPHY_CLKO1SEL (1 << 3) | ||
| 24 | #define USBPHY_OSCPDWN (1 << 2) | ||
| 25 | #define USBPHY_PHYPDWN (1 << 0) | ||
| 26 | |||
| 27 | /* For now include usb OTG module registers here */ | ||
| 28 | #define DAVINCI_USB_VERSION_REG 0x00 | ||
| 29 | #define DAVINCI_USB_CTRL_REG 0x04 | ||
| 30 | #define DAVINCI_USB_STAT_REG 0x08 | ||
| 31 | #define DAVINCI_RNDIS_REG 0x10 | ||
| 32 | #define DAVINCI_AUTOREQ_REG 0x14 | ||
| 33 | #define DAVINCI_USB_INT_SOURCE_REG 0x20 | ||
| 34 | #define DAVINCI_USB_INT_SET_REG 0x24 | ||
| 35 | #define DAVINCI_USB_INT_SRC_CLR_REG 0x28 | ||
| 36 | #define DAVINCI_USB_INT_MASK_REG 0x2c | ||
| 37 | #define DAVINCI_USB_INT_MASK_SET_REG 0x30 | ||
| 38 | #define DAVINCI_USB_INT_MASK_CLR_REG 0x34 | ||
| 39 | #define DAVINCI_USB_INT_SRC_MASKED_REG 0x38 | ||
| 40 | #define DAVINCI_USB_EOI_REG 0x3c | ||
| 41 | #define DAVINCI_USB_EOI_INTVEC 0x40 | ||
| 42 | |||
| 43 | /* BEGIN CPPI-generic (?) */ | ||
| 44 | |||
| 45 | /* CPPI related registers */ | ||
| 46 | #define DAVINCI_TXCPPI_CTRL_REG 0x80 | ||
| 47 | #define DAVINCI_TXCPPI_TEAR_REG 0x84 | ||
| 48 | #define DAVINCI_CPPI_EOI_REG 0x88 | ||
| 49 | #define DAVINCI_CPPI_INTVEC_REG 0x8c | ||
| 50 | #define DAVINCI_TXCPPI_MASKED_REG 0x90 | ||
| 51 | #define DAVINCI_TXCPPI_RAW_REG 0x94 | ||
| 52 | #define DAVINCI_TXCPPI_INTENAB_REG 0x98 | ||
| 53 | #define DAVINCI_TXCPPI_INTCLR_REG 0x9c | ||
| 54 | |||
| 55 | #define DAVINCI_RXCPPI_CTRL_REG 0xC0 | ||
| 56 | #define DAVINCI_RXCPPI_MASKED_REG 0xD0 | ||
| 57 | #define DAVINCI_RXCPPI_RAW_REG 0xD4 | ||
| 58 | #define DAVINCI_RXCPPI_INTENAB_REG 0xD8 | ||
| 59 | #define DAVINCI_RXCPPI_INTCLR_REG 0xDC | ||
| 60 | |||
| 61 | #define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0 | ||
| 62 | #define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4 | ||
| 63 | #define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8 | ||
| 64 | #define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC | ||
| 65 | |||
| 66 | /* CPPI state RAM entries */ | ||
| 67 | #define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100 | ||
| 68 | |||
| 69 | #define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \ | ||
| 70 | (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40)) | ||
| 71 | #define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \ | ||
| 72 | (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40)) | ||
| 73 | |||
| 74 | /* CPPI masks */ | ||
| 75 | #define DAVINCI_DMA_CTRL_ENABLE 1 | ||
| 76 | #define DAVINCI_DMA_CTRL_DISABLE 0 | ||
| 77 | |||
| 78 | #define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF | ||
| 79 | #define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF | ||
| 80 | |||
| 81 | /* END CPPI-generic (?) */ | ||
| 82 | |||
| 83 | #define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */ | ||
| 84 | #define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */ | ||
| 85 | |||
| 86 | #define DAVINCI_USB_USBINT_SHIFT 16 | ||
| 87 | #define DAVINCI_USB_TXINT_SHIFT 0 | ||
| 88 | #define DAVINCI_USB_RXINT_SHIFT 8 | ||
| 89 | |||
| 90 | #define DAVINCI_INTR_DRVVBUS 0x0100 | ||
| 91 | |||
| 92 | #define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */ | ||
| 93 | #define DAVINCI_USB_TXINT_MASK \ | ||
| 94 | (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT) | ||
| 95 | #define DAVINCI_USB_RXINT_MASK \ | ||
| 96 | (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT) | ||
| 97 | |||
| 98 | #define DAVINCI_BASE_OFFSET 0x400 | ||
| 99 | |||
| 100 | #endif /* __MUSB_HDRDF_H__ */ | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c new file mode 100644 index 000000000000..d68ec6daf335 --- /dev/null +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -0,0 +1,2261 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver core code | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Inventra (Multipoint) Dual-Role Controller Driver for Linux. | ||
| 37 | * | ||
| 38 | * This consists of a Host Controller Driver (HCD) and a peripheral | ||
| 39 | * controller driver implementing the "Gadget" API; OTG support is | ||
| 40 | * in the works. These are normal Linux-USB controller drivers which | ||
| 41 | * use IRQs and have no dedicated thread. | ||
| 42 | * | ||
| 43 | * This version of the driver has only been used with products from | ||
| 44 | * Texas Instruments. Those products integrate the Inventra logic | ||
| 45 | * with other DMA, IRQ, and bus modules, as well as other logic that | ||
| 46 | * needs to be reflected in this driver. | ||
| 47 | * | ||
| 48 | * | ||
| 49 | * NOTE: the original Mentor code here was pretty much a collection | ||
| 50 | * of mechanisms that don't seem to have been fully integrated/working | ||
| 51 | * for *any* Linux kernel version. This version aims at Linux 2.6.now, | ||
| 52 | * Key open issues include: | ||
| 53 | * | ||
| 54 | * - Lack of host-side transaction scheduling, for all transfer types. | ||
| 55 | * The hardware doesn't do it; instead, software must. | ||
| 56 | * | ||
| 57 | * This is not an issue for OTG devices that don't support external | ||
| 58 | * hubs, but for more "normal" USB hosts it's a user issue that the | ||
| 59 | * "multipoint" support doesn't scale in the expected ways. That | ||
| 60 | * includes DaVinci EVM in a common non-OTG mode. | ||
| 61 | * | ||
| 62 | * * Control and bulk use dedicated endpoints, and there's as | ||
| 63 | * yet no mechanism to either (a) reclaim the hardware when | ||
| 64 | * peripherals are NAKing, which gets complicated with bulk | ||
| 65 | * endpoints, or (b) use more than a single bulk endpoint in | ||
| 66 | * each direction. | ||
| 67 | * | ||
| 68 | * RESULT: one device may be perceived as blocking another one. | ||
| 69 | * | ||
| 70 | * * Interrupt and isochronous will dynamically allocate endpoint | ||
| 71 | * hardware, but (a) there's no record keeping for bandwidth; | ||
| 72 | * (b) in the common case that few endpoints are available, there | ||
| 73 | * is no mechanism to reuse endpoints to talk to multiple devices. | ||
| 74 | * | ||
| 75 | * RESULT: At one extreme, bandwidth can be overcommitted in | ||
| 76 | * some hardware configurations, no faults will be reported. | ||
| 77 | * At the other extreme, the bandwidth capabilities which do | ||
| 78 | * exist tend to be severely undercommitted. You can't yet hook | ||
| 79 | * up both a keyboard and a mouse to an external USB hub. | ||
| 80 | */ | ||
| 81 | |||
| 82 | /* | ||
| 83 | * This gets many kinds of configuration information: | ||
| 84 | * - Kconfig for everything user-configurable | ||
| 85 | * - <asm/arch/hdrc_cnf.h> for SOC or family details | ||
| 86 | * - platform_device for addressing, irq, and platform_data | ||
| 87 | * - platform_data is mostly for board-specific informarion | ||
| 88 | * | ||
| 89 | * Most of the conditional compilation will (someday) vanish. | ||
| 90 | */ | ||
| 91 | |||
| 92 | #include <linux/module.h> | ||
| 93 | #include <linux/kernel.h> | ||
| 94 | #include <linux/sched.h> | ||
| 95 | #include <linux/slab.h> | ||
| 96 | #include <linux/init.h> | ||
| 97 | #include <linux/list.h> | ||
| 98 | #include <linux/kobject.h> | ||
| 99 | #include <linux/platform_device.h> | ||
| 100 | #include <linux/io.h> | ||
| 101 | |||
| 102 | #ifdef CONFIG_ARM | ||
| 103 | #include <asm/arch/hardware.h> | ||
| 104 | #include <asm/arch/memory.h> | ||
| 105 | #include <asm/mach-types.h> | ||
| 106 | #endif | ||
| 107 | |||
| 108 | #include "musb_core.h" | ||
| 109 | |||
| 110 | |||
| 111 | #ifdef CONFIG_ARCH_DAVINCI | ||
| 112 | #include "davinci.h" | ||
| 113 | #endif | ||
| 114 | |||
| 115 | |||
| 116 | |||
| 117 | #if MUSB_DEBUG > 0 | ||
| 118 | unsigned debug = MUSB_DEBUG; | ||
| 119 | module_param(debug, uint, 0); | ||
| 120 | MODULE_PARM_DESC(debug, "initial debug message level"); | ||
| 121 | |||
| 122 | #define MUSB_VERSION_SUFFIX "/dbg" | ||
| 123 | #endif | ||
| 124 | |||
| 125 | #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" | ||
| 126 | #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" | ||
| 127 | |||
| 128 | #define MUSB_VERSION_BASE "6.0" | ||
| 129 | |||
| 130 | #ifndef MUSB_VERSION_SUFFIX | ||
| 131 | #define MUSB_VERSION_SUFFIX "" | ||
| 132 | #endif | ||
| 133 | #define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX | ||
| 134 | |||
| 135 | #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION | ||
| 136 | |||
| 137 | #define MUSB_DRIVER_NAME "musb_hdrc" | ||
| 138 | const char musb_driver_name[] = MUSB_DRIVER_NAME; | ||
| 139 | |||
| 140 | MODULE_DESCRIPTION(DRIVER_INFO); | ||
| 141 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
| 142 | MODULE_LICENSE("GPL"); | ||
| 143 | MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); | ||
| 144 | |||
| 145 | |||
| 146 | /*-------------------------------------------------------------------------*/ | ||
| 147 | |||
| 148 | static inline struct musb *dev_to_musb(struct device *dev) | ||
| 149 | { | ||
| 150 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 151 | /* usbcore insists dev->driver_data is a "struct hcd *" */ | ||
| 152 | return hcd_to_musb(dev_get_drvdata(dev)); | ||
| 153 | #else | ||
| 154 | return dev_get_drvdata(dev); | ||
| 155 | #endif | ||
| 156 | } | ||
| 157 | |||
| 158 | /*-------------------------------------------------------------------------*/ | ||
| 159 | |||
| 160 | #ifndef CONFIG_USB_TUSB6010 | ||
| 161 | /* | ||
| 162 | * Load an endpoint's FIFO | ||
| 163 | */ | ||
| 164 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | ||
| 165 | { | ||
| 166 | void __iomem *fifo = hw_ep->fifo; | ||
| 167 | |||
| 168 | prefetch((u8 *)src); | ||
| 169 | |||
| 170 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
| 171 | 'T', hw_ep->epnum, fifo, len, src); | ||
| 172 | |||
| 173 | /* we can't assume unaligned reads work */ | ||
| 174 | if (likely((0x01 & (unsigned long) src) == 0)) { | ||
| 175 | u16 index = 0; | ||
| 176 | |||
| 177 | /* best case is 32bit-aligned source address */ | ||
| 178 | if ((0x02 & (unsigned long) src) == 0) { | ||
| 179 | if (len >= 4) { | ||
| 180 | writesl(fifo, src + index, len >> 2); | ||
| 181 | index += len & ~0x03; | ||
| 182 | } | ||
| 183 | if (len & 0x02) { | ||
| 184 | musb_writew(fifo, 0, *(u16 *)&src[index]); | ||
| 185 | index += 2; | ||
| 186 | } | ||
| 187 | } else { | ||
| 188 | if (len >= 2) { | ||
| 189 | writesw(fifo, src + index, len >> 1); | ||
| 190 | index += len & ~0x01; | ||
| 191 | } | ||
| 192 | } | ||
| 193 | if (len & 0x01) | ||
| 194 | musb_writeb(fifo, 0, src[index]); | ||
| 195 | } else { | ||
| 196 | /* byte aligned */ | ||
| 197 | writesb(fifo, src, len); | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | /* | ||
| 202 | * Unload an endpoint's FIFO | ||
| 203 | */ | ||
| 204 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | ||
| 205 | { | ||
| 206 | void __iomem *fifo = hw_ep->fifo; | ||
| 207 | |||
| 208 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
| 209 | 'R', hw_ep->epnum, fifo, len, dst); | ||
| 210 | |||
| 211 | /* we can't assume unaligned writes work */ | ||
| 212 | if (likely((0x01 & (unsigned long) dst) == 0)) { | ||
| 213 | u16 index = 0; | ||
| 214 | |||
| 215 | /* best case is 32bit-aligned destination address */ | ||
| 216 | if ((0x02 & (unsigned long) dst) == 0) { | ||
| 217 | if (len >= 4) { | ||
| 218 | readsl(fifo, dst, len >> 2); | ||
| 219 | index = len & ~0x03; | ||
| 220 | } | ||
| 221 | if (len & 0x02) { | ||
| 222 | *(u16 *)&dst[index] = musb_readw(fifo, 0); | ||
| 223 | index += 2; | ||
| 224 | } | ||
| 225 | } else { | ||
| 226 | if (len >= 2) { | ||
| 227 | readsw(fifo, dst, len >> 1); | ||
| 228 | index = len & ~0x01; | ||
| 229 | } | ||
| 230 | } | ||
| 231 | if (len & 0x01) | ||
| 232 | dst[index] = musb_readb(fifo, 0); | ||
| 233 | } else { | ||
| 234 | /* byte aligned */ | ||
| 235 | readsb(fifo, dst, len); | ||
| 236 | } | ||
| 237 | } | ||
| 238 | |||
| 239 | #endif /* normal PIO */ | ||
| 240 | |||
| 241 | |||
| 242 | /*-------------------------------------------------------------------------*/ | ||
| 243 | |||
| 244 | /* for high speed test mode; see USB 2.0 spec 7.1.20 */ | ||
| 245 | static const u8 musb_test_packet[53] = { | ||
| 246 | /* implicit SYNC then DATA0 to start */ | ||
| 247 | |||
| 248 | /* JKJKJKJK x9 */ | ||
| 249 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 250 | /* JJKKJJKK x8 */ | ||
| 251 | 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, | ||
| 252 | /* JJJJKKKK x8 */ | ||
| 253 | 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, | ||
| 254 | /* JJJJJJJKKKKKKK x8 */ | ||
| 255 | 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
| 256 | /* JJJJJJJK x8 */ | ||
| 257 | 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, | ||
| 258 | /* JKKKKKKK x10, JK */ | ||
| 259 | 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e | ||
| 260 | |||
| 261 | /* implicit CRC16 then EOP to end */ | ||
| 262 | }; | ||
| 263 | |||
| 264 | void musb_load_testpacket(struct musb *musb) | ||
| 265 | { | ||
| 266 | void __iomem *regs = musb->endpoints[0].regs; | ||
| 267 | |||
| 268 | musb_ep_select(musb->mregs, 0); | ||
| 269 | musb_write_fifo(musb->control_ep, | ||
| 270 | sizeof(musb_test_packet), musb_test_packet); | ||
| 271 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY); | ||
| 272 | } | ||
| 273 | |||
| 274 | /*-------------------------------------------------------------------------*/ | ||
| 275 | |||
| 276 | const char *otg_state_string(struct musb *musb) | ||
| 277 | { | ||
| 278 | switch (musb->xceiv.state) { | ||
| 279 | case OTG_STATE_A_IDLE: return "a_idle"; | ||
| 280 | case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; | ||
| 281 | case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; | ||
| 282 | case OTG_STATE_A_HOST: return "a_host"; | ||
| 283 | case OTG_STATE_A_SUSPEND: return "a_suspend"; | ||
| 284 | case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; | ||
| 285 | case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; | ||
| 286 | case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; | ||
| 287 | case OTG_STATE_B_IDLE: return "b_idle"; | ||
| 288 | case OTG_STATE_B_SRP_INIT: return "b_srp_init"; | ||
| 289 | case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; | ||
| 290 | case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; | ||
| 291 | case OTG_STATE_B_HOST: return "b_host"; | ||
| 292 | default: return "UNDEFINED"; | ||
| 293 | } | ||
| 294 | } | ||
| 295 | |||
| 296 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 297 | |||
| 298 | /* | ||
| 299 | * See also USB_OTG_1-3.pdf 6.6.5 Timers | ||
| 300 | * REVISIT: Are the other timers done in the hardware? | ||
| 301 | */ | ||
| 302 | #define TB_ASE0_BRST 100 /* Min 3.125 ms */ | ||
| 303 | |||
| 304 | /* | ||
| 305 | * Handles OTG hnp timeouts, such as b_ase0_brst | ||
| 306 | */ | ||
| 307 | void musb_otg_timer_func(unsigned long data) | ||
| 308 | { | ||
| 309 | struct musb *musb = (struct musb *)data; | ||
| 310 | unsigned long flags; | ||
| 311 | |||
| 312 | spin_lock_irqsave(&musb->lock, flags); | ||
| 313 | switch (musb->xceiv.state) { | ||
| 314 | case OTG_STATE_B_WAIT_ACON: | ||
| 315 | DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); | ||
| 316 | musb_g_disconnect(musb); | ||
| 317 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 318 | musb->is_active = 0; | ||
| 319 | break; | ||
| 320 | case OTG_STATE_A_WAIT_BCON: | ||
| 321 | DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n"); | ||
| 322 | musb_hnp_stop(musb); | ||
| 323 | break; | ||
| 324 | default: | ||
| 325 | DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb)); | ||
| 326 | } | ||
| 327 | musb->ignore_disconnect = 0; | ||
| 328 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 329 | } | ||
| 330 | |||
| 331 | static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0); | ||
| 332 | |||
| 333 | /* | ||
| 334 | * Stops the B-device HNP state. Caller must take care of locking. | ||
| 335 | */ | ||
| 336 | void musb_hnp_stop(struct musb *musb) | ||
| 337 | { | ||
| 338 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
| 339 | void __iomem *mbase = musb->mregs; | ||
| 340 | u8 reg; | ||
| 341 | |||
| 342 | switch (musb->xceiv.state) { | ||
| 343 | case OTG_STATE_A_PERIPHERAL: | ||
| 344 | case OTG_STATE_A_WAIT_VFALL: | ||
| 345 | case OTG_STATE_A_WAIT_BCON: | ||
| 346 | DBG(1, "HNP: Switching back to A-host\n"); | ||
| 347 | musb_g_disconnect(musb); | ||
| 348 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 349 | MUSB_HST_MODE(musb); | ||
| 350 | musb->is_active = 0; | ||
| 351 | break; | ||
| 352 | case OTG_STATE_B_HOST: | ||
| 353 | DBG(1, "HNP: Disabling HR\n"); | ||
| 354 | hcd->self.is_b_host = 0; | ||
| 355 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 356 | MUSB_DEV_MODE(musb); | ||
| 357 | reg = musb_readb(mbase, MUSB_POWER); | ||
| 358 | reg |= MUSB_POWER_SUSPENDM; | ||
| 359 | musb_writeb(mbase, MUSB_POWER, reg); | ||
| 360 | /* REVISIT: Start SESSION_REQUEST here? */ | ||
| 361 | break; | ||
| 362 | default: | ||
| 363 | DBG(1, "HNP: Stopping in unknown state %s\n", | ||
| 364 | otg_state_string(musb)); | ||
| 365 | } | ||
| 366 | |||
| 367 | /* | ||
| 368 | * When returning to A state after HNP, avoid hub_port_rebounce(), | ||
| 369 | * which cause occasional OPT A "Did not receive reset after connect" | ||
| 370 | * errors. | ||
| 371 | */ | ||
| 372 | musb->port1_status &= | ||
| 373 | ~(1 << USB_PORT_FEAT_C_CONNECTION); | ||
| 374 | } | ||
| 375 | |||
| 376 | #endif | ||
| 377 | |||
| 378 | /* | ||
| 379 | * Interrupt Service Routine to record USB "global" interrupts. | ||
| 380 | * Since these do not happen often and signify things of | ||
| 381 | * paramount importance, it seems OK to check them individually; | ||
| 382 | * the order of the tests is specified in the manual | ||
| 383 | * | ||
| 384 | * @param musb instance pointer | ||
| 385 | * @param int_usb register contents | ||
| 386 | * @param devctl | ||
| 387 | * @param power | ||
| 388 | */ | ||
| 389 | |||
| 390 | #define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ | ||
| 391 | | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ | ||
| 392 | | MUSB_INTR_RESET) | ||
| 393 | |||
| 394 | static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | ||
| 395 | u8 devctl, u8 power) | ||
| 396 | { | ||
| 397 | irqreturn_t handled = IRQ_NONE; | ||
| 398 | void __iomem *mbase = musb->mregs; | ||
| 399 | |||
| 400 | DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, | ||
| 401 | int_usb); | ||
| 402 | |||
| 403 | /* in host mode, the peripheral may issue remote wakeup. | ||
| 404 | * in peripheral mode, the host may resume the link. | ||
| 405 | * spurious RESUME irqs happen too, paired with SUSPEND. | ||
| 406 | */ | ||
| 407 | if (int_usb & MUSB_INTR_RESUME) { | ||
| 408 | handled = IRQ_HANDLED; | ||
| 409 | DBG(3, "RESUME (%s)\n", otg_state_string(musb)); | ||
| 410 | |||
| 411 | if (devctl & MUSB_DEVCTL_HM) { | ||
| 412 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 413 | switch (musb->xceiv.state) { | ||
| 414 | case OTG_STATE_A_SUSPEND: | ||
| 415 | /* remote wakeup? later, GetPortStatus | ||
| 416 | * will stop RESUME signaling | ||
| 417 | */ | ||
| 418 | |||
| 419 | if (power & MUSB_POWER_SUSPENDM) { | ||
| 420 | /* spurious */ | ||
| 421 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | ||
| 422 | DBG(2, "Spurious SUSPENDM\n"); | ||
| 423 | break; | ||
| 424 | } | ||
| 425 | |||
| 426 | power &= ~MUSB_POWER_SUSPENDM; | ||
| 427 | musb_writeb(mbase, MUSB_POWER, | ||
| 428 | power | MUSB_POWER_RESUME); | ||
| 429 | |||
| 430 | musb->port1_status |= | ||
| 431 | (USB_PORT_STAT_C_SUSPEND << 16) | ||
| 432 | | MUSB_PORT_STAT_RESUME; | ||
| 433 | musb->rh_timer = jiffies | ||
| 434 | + msecs_to_jiffies(20); | ||
| 435 | |||
| 436 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
| 437 | musb->is_active = 1; | ||
| 438 | usb_hcd_resume_root_hub(musb_to_hcd(musb)); | ||
| 439 | break; | ||
| 440 | case OTG_STATE_B_WAIT_ACON: | ||
| 441 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 442 | musb->is_active = 1; | ||
| 443 | MUSB_DEV_MODE(musb); | ||
| 444 | break; | ||
| 445 | default: | ||
| 446 | WARNING("bogus %s RESUME (%s)\n", | ||
| 447 | "host", | ||
| 448 | otg_state_string(musb)); | ||
| 449 | } | ||
| 450 | #endif | ||
| 451 | } else { | ||
| 452 | switch (musb->xceiv.state) { | ||
| 453 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 454 | case OTG_STATE_A_SUSPEND: | ||
| 455 | /* possibly DISCONNECT is upcoming */ | ||
| 456 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
| 457 | usb_hcd_resume_root_hub(musb_to_hcd(musb)); | ||
| 458 | break; | ||
| 459 | #endif | ||
| 460 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 461 | case OTG_STATE_B_WAIT_ACON: | ||
| 462 | case OTG_STATE_B_PERIPHERAL: | ||
| 463 | /* disconnect while suspended? we may | ||
| 464 | * not get a disconnect irq... | ||
| 465 | */ | ||
| 466 | if ((devctl & MUSB_DEVCTL_VBUS) | ||
| 467 | != (3 << MUSB_DEVCTL_VBUS_SHIFT) | ||
| 468 | ) { | ||
| 469 | musb->int_usb |= MUSB_INTR_DISCONNECT; | ||
| 470 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | ||
| 471 | break; | ||
| 472 | } | ||
| 473 | musb_g_resume(musb); | ||
| 474 | break; | ||
| 475 | case OTG_STATE_B_IDLE: | ||
| 476 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | ||
| 477 | break; | ||
| 478 | #endif | ||
| 479 | default: | ||
| 480 | WARNING("bogus %s RESUME (%s)\n", | ||
| 481 | "peripheral", | ||
| 482 | otg_state_string(musb)); | ||
| 483 | } | ||
| 484 | } | ||
| 485 | } | ||
| 486 | |||
| 487 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 488 | /* see manual for the order of the tests */ | ||
| 489 | if (int_usb & MUSB_INTR_SESSREQ) { | ||
| 490 | DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); | ||
| 491 | |||
| 492 | /* IRQ arrives from ID pin sense or (later, if VBUS power | ||
| 493 | * is removed) SRP. responses are time critical: | ||
| 494 | * - turn on VBUS (with silicon-specific mechanism) | ||
| 495 | * - go through A_WAIT_VRISE | ||
| 496 | * - ... to A_WAIT_BCON. | ||
| 497 | * a_wait_vrise_tmout triggers VBUS_ERROR transitions | ||
| 498 | */ | ||
| 499 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); | ||
| 500 | musb->ep0_stage = MUSB_EP0_START; | ||
| 501 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 502 | MUSB_HST_MODE(musb); | ||
| 503 | musb_set_vbus(musb, 1); | ||
| 504 | |||
| 505 | handled = IRQ_HANDLED; | ||
| 506 | } | ||
| 507 | |||
| 508 | if (int_usb & MUSB_INTR_VBUSERROR) { | ||
| 509 | int ignore = 0; | ||
| 510 | |||
| 511 | /* During connection as an A-Device, we may see a short | ||
| 512 | * current spikes causing voltage drop, because of cable | ||
| 513 | * and peripheral capacitance combined with vbus draw. | ||
| 514 | * (So: less common with truly self-powered devices, where | ||
| 515 | * vbus doesn't act like a power supply.) | ||
| 516 | * | ||
| 517 | * Such spikes are short; usually less than ~500 usec, max | ||
| 518 | * of ~2 msec. That is, they're not sustained overcurrent | ||
| 519 | * errors, though they're reported using VBUSERROR irqs. | ||
| 520 | * | ||
| 521 | * Workarounds: (a) hardware: use self powered devices. | ||
| 522 | * (b) software: ignore non-repeated VBUS errors. | ||
| 523 | * | ||
| 524 | * REVISIT: do delays from lots of DEBUG_KERNEL checks | ||
| 525 | * make trouble here, keeping VBUS < 4.4V ? | ||
| 526 | */ | ||
| 527 | switch (musb->xceiv.state) { | ||
| 528 | case OTG_STATE_A_HOST: | ||
| 529 | /* recovery is dicey once we've gotten past the | ||
| 530 | * initial stages of enumeration, but if VBUS | ||
| 531 | * stayed ok at the other end of the link, and | ||
| 532 | * another reset is due (at least for high speed, | ||
| 533 | * to redo the chirp etc), it might work OK... | ||
| 534 | */ | ||
| 535 | case OTG_STATE_A_WAIT_BCON: | ||
| 536 | case OTG_STATE_A_WAIT_VRISE: | ||
| 537 | if (musb->vbuserr_retry) { | ||
| 538 | musb->vbuserr_retry--; | ||
| 539 | ignore = 1; | ||
| 540 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 541 | musb_writeb(mbase, MUSB_DEVCTL, devctl); | ||
| 542 | } else { | ||
| 543 | musb->port1_status |= | ||
| 544 | (1 << USB_PORT_FEAT_OVER_CURRENT) | ||
| 545 | | (1 << USB_PORT_FEAT_C_OVER_CURRENT); | ||
| 546 | } | ||
| 547 | break; | ||
| 548 | default: | ||
| 549 | break; | ||
| 550 | } | ||
| 551 | |||
| 552 | DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", | ||
| 553 | otg_state_string(musb), | ||
| 554 | devctl, | ||
| 555 | ({ char *s; | ||
| 556 | switch (devctl & MUSB_DEVCTL_VBUS) { | ||
| 557 | case 0 << MUSB_DEVCTL_VBUS_SHIFT: | ||
| 558 | s = "<SessEnd"; break; | ||
| 559 | case 1 << MUSB_DEVCTL_VBUS_SHIFT: | ||
| 560 | s = "<AValid"; break; | ||
| 561 | case 2 << MUSB_DEVCTL_VBUS_SHIFT: | ||
| 562 | s = "<VBusValid"; break; | ||
| 563 | /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */ | ||
| 564 | default: | ||
| 565 | s = "VALID"; break; | ||
| 566 | }; s; }), | ||
| 567 | VBUSERR_RETRY_COUNT - musb->vbuserr_retry, | ||
| 568 | musb->port1_status); | ||
| 569 | |||
| 570 | /* go through A_WAIT_VFALL then start a new session */ | ||
| 571 | if (!ignore) | ||
| 572 | musb_set_vbus(musb, 0); | ||
| 573 | handled = IRQ_HANDLED; | ||
| 574 | } | ||
| 575 | |||
| 576 | if (int_usb & MUSB_INTR_CONNECT) { | ||
| 577 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
| 578 | |||
| 579 | handled = IRQ_HANDLED; | ||
| 580 | musb->is_active = 1; | ||
| 581 | set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); | ||
| 582 | |||
| 583 | musb->ep0_stage = MUSB_EP0_START; | ||
| 584 | |||
| 585 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 586 | /* flush endpoints when transitioning from Device Mode */ | ||
| 587 | if (is_peripheral_active(musb)) { | ||
| 588 | /* REVISIT HNP; just force disconnect */ | ||
| 589 | } | ||
| 590 | musb_writew(mbase, MUSB_INTRTXE, musb->epmask); | ||
| 591 | musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe); | ||
| 592 | musb_writeb(mbase, MUSB_INTRUSBE, 0xf7); | ||
| 593 | #endif | ||
| 594 | musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED | ||
| 595 | |USB_PORT_STAT_HIGH_SPEED | ||
| 596 | |USB_PORT_STAT_ENABLE | ||
| 597 | ); | ||
| 598 | musb->port1_status |= USB_PORT_STAT_CONNECTION | ||
| 599 | |(USB_PORT_STAT_C_CONNECTION << 16); | ||
| 600 | |||
| 601 | /* high vs full speed is just a guess until after reset */ | ||
| 602 | if (devctl & MUSB_DEVCTL_LSDEV) | ||
| 603 | musb->port1_status |= USB_PORT_STAT_LOW_SPEED; | ||
| 604 | |||
| 605 | if (hcd->status_urb) | ||
| 606 | usb_hcd_poll_rh_status(hcd); | ||
| 607 | else | ||
| 608 | usb_hcd_resume_root_hub(hcd); | ||
| 609 | |||
| 610 | MUSB_HST_MODE(musb); | ||
| 611 | |||
| 612 | /* indicate new connection to OTG machine */ | ||
| 613 | switch (musb->xceiv.state) { | ||
| 614 | case OTG_STATE_B_PERIPHERAL: | ||
| 615 | if (int_usb & MUSB_INTR_SUSPEND) { | ||
| 616 | DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); | ||
| 617 | musb->xceiv.state = OTG_STATE_B_HOST; | ||
| 618 | hcd->self.is_b_host = 1; | ||
| 619 | int_usb &= ~MUSB_INTR_SUSPEND; | ||
| 620 | } else | ||
| 621 | DBG(1, "CONNECT as b_peripheral???\n"); | ||
| 622 | break; | ||
| 623 | case OTG_STATE_B_WAIT_ACON: | ||
| 624 | DBG(1, "HNP: Waiting to switch to b_host state\n"); | ||
| 625 | musb->xceiv.state = OTG_STATE_B_HOST; | ||
| 626 | hcd->self.is_b_host = 1; | ||
| 627 | break; | ||
| 628 | default: | ||
| 629 | if ((devctl & MUSB_DEVCTL_VBUS) | ||
| 630 | == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { | ||
| 631 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
| 632 | hcd->self.is_b_host = 0; | ||
| 633 | } | ||
| 634 | break; | ||
| 635 | } | ||
| 636 | DBG(1, "CONNECT (%s) devctl %02x\n", | ||
| 637 | otg_state_string(musb), devctl); | ||
| 638 | } | ||
| 639 | #endif /* CONFIG_USB_MUSB_HDRC_HCD */ | ||
| 640 | |||
| 641 | /* mentor saves a bit: bus reset and babble share the same irq. | ||
| 642 | * only host sees babble; only peripheral sees bus reset. | ||
| 643 | */ | ||
| 644 | if (int_usb & MUSB_INTR_RESET) { | ||
| 645 | if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { | ||
| 646 | /* | ||
| 647 | * Looks like non-HS BABBLE can be ignored, but | ||
| 648 | * HS BABBLE is an error condition. For HS the solution | ||
| 649 | * is to avoid babble in the first place and fix what | ||
| 650 | * caused BABBLE. When HS BABBLE happens we can only | ||
| 651 | * stop the session. | ||
| 652 | */ | ||
| 653 | if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) | ||
| 654 | DBG(1, "BABBLE devctl: %02x\n", devctl); | ||
| 655 | else { | ||
| 656 | ERR("Stopping host session -- babble\n"); | ||
| 657 | musb_writeb(mbase, MUSB_DEVCTL, 0); | ||
| 658 | } | ||
| 659 | } else if (is_peripheral_capable()) { | ||
| 660 | DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); | ||
| 661 | switch (musb->xceiv.state) { | ||
| 662 | #ifdef CONFIG_USB_OTG | ||
| 663 | case OTG_STATE_A_SUSPEND: | ||
| 664 | /* We need to ignore disconnect on suspend | ||
| 665 | * otherwise tusb 2.0 won't reconnect after a | ||
| 666 | * power cycle, which breaks otg compliance. | ||
| 667 | */ | ||
| 668 | musb->ignore_disconnect = 1; | ||
| 669 | musb_g_reset(musb); | ||
| 670 | /* FALLTHROUGH */ | ||
| 671 | case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ | ||
| 672 | DBG(1, "HNP: Setting timer as %s\n", | ||
| 673 | otg_state_string(musb)); | ||
| 674 | musb_otg_timer.data = (unsigned long)musb; | ||
| 675 | mod_timer(&musb_otg_timer, jiffies | ||
| 676 | + msecs_to_jiffies(100)); | ||
| 677 | break; | ||
| 678 | case OTG_STATE_A_PERIPHERAL: | ||
| 679 | musb_hnp_stop(musb); | ||
| 680 | break; | ||
| 681 | case OTG_STATE_B_WAIT_ACON: | ||
| 682 | DBG(1, "HNP: RESET (%s), to b_peripheral\n", | ||
| 683 | otg_state_string(musb)); | ||
| 684 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 685 | musb_g_reset(musb); | ||
| 686 | break; | ||
| 687 | #endif | ||
| 688 | case OTG_STATE_B_IDLE: | ||
| 689 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 690 | /* FALLTHROUGH */ | ||
| 691 | case OTG_STATE_B_PERIPHERAL: | ||
| 692 | musb_g_reset(musb); | ||
| 693 | break; | ||
| 694 | default: | ||
| 695 | DBG(1, "Unhandled BUS RESET as %s\n", | ||
| 696 | otg_state_string(musb)); | ||
| 697 | } | ||
| 698 | } | ||
| 699 | |||
| 700 | handled = IRQ_HANDLED; | ||
| 701 | } | ||
| 702 | schedule_work(&musb->irq_work); | ||
| 703 | |||
| 704 | return handled; | ||
| 705 | } | ||
| 706 | |||
| 707 | /* | ||
| 708 | * Interrupt Service Routine to record USB "global" interrupts. | ||
| 709 | * Since these do not happen often and signify things of | ||
| 710 | * paramount importance, it seems OK to check them individually; | ||
| 711 | * the order of the tests is specified in the manual | ||
| 712 | * | ||
| 713 | * @param musb instance pointer | ||
| 714 | * @param int_usb register contents | ||
| 715 | * @param devctl | ||
| 716 | * @param power | ||
| 717 | */ | ||
| 718 | static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, | ||
| 719 | u8 devctl, u8 power) | ||
| 720 | { | ||
| 721 | irqreturn_t handled = IRQ_NONE; | ||
| 722 | |||
| 723 | #if 0 | ||
| 724 | /* REVISIT ... this would be for multiplexing periodic endpoints, or | ||
| 725 | * supporting transfer phasing to prevent exceeding ISO bandwidth | ||
| 726 | * limits of a given frame or microframe. | ||
| 727 | * | ||
| 728 | * It's not needed for peripheral side, which dedicates endpoints; | ||
| 729 | * though it _might_ use SOF irqs for other purposes. | ||
| 730 | * | ||
| 731 | * And it's not currently needed for host side, which also dedicates | ||
| 732 | * endpoints, relies on TX/RX interval registers, and isn't claimed | ||
| 733 | * to support ISO transfers yet. | ||
| 734 | */ | ||
| 735 | if (int_usb & MUSB_INTR_SOF) { | ||
| 736 | void __iomem *mbase = musb->mregs; | ||
| 737 | struct musb_hw_ep *ep; | ||
| 738 | u8 epnum; | ||
| 739 | u16 frame; | ||
| 740 | |||
| 741 | DBG(6, "START_OF_FRAME\n"); | ||
| 742 | handled = IRQ_HANDLED; | ||
| 743 | |||
| 744 | /* start any periodic Tx transfers waiting for current frame */ | ||
| 745 | frame = musb_readw(mbase, MUSB_FRAME); | ||
| 746 | ep = musb->endpoints; | ||
| 747 | for (epnum = 1; (epnum < musb->nr_endpoints) | ||
| 748 | && (musb->epmask >= (1 << epnum)); | ||
| 749 | epnum++, ep++) { | ||
| 750 | /* | ||
| 751 | * FIXME handle framecounter wraps (12 bits) | ||
| 752 | * eliminate duplicated StartUrb logic | ||
| 753 | */ | ||
| 754 | if (ep->dwWaitFrame >= frame) { | ||
| 755 | ep->dwWaitFrame = 0; | ||
| 756 | pr_debug("SOF --> periodic TX%s on %d\n", | ||
| 757 | ep->tx_channel ? " DMA" : "", | ||
| 758 | epnum); | ||
| 759 | if (!ep->tx_channel) | ||
| 760 | musb_h_tx_start(musb, epnum); | ||
| 761 | else | ||
| 762 | cppi_hostdma_start(musb, epnum); | ||
| 763 | } | ||
| 764 | } /* end of for loop */ | ||
| 765 | } | ||
| 766 | #endif | ||
| 767 | |||
| 768 | if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { | ||
| 769 | DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", | ||
| 770 | otg_state_string(musb), | ||
| 771 | MUSB_MODE(musb), devctl); | ||
| 772 | handled = IRQ_HANDLED; | ||
| 773 | |||
| 774 | switch (musb->xceiv.state) { | ||
| 775 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 776 | case OTG_STATE_A_HOST: | ||
| 777 | case OTG_STATE_A_SUSPEND: | ||
| 778 | musb_root_disconnect(musb); | ||
| 779 | if (musb->a_wait_bcon != 0) | ||
| 780 | musb_platform_try_idle(musb, jiffies | ||
| 781 | + msecs_to_jiffies(musb->a_wait_bcon)); | ||
| 782 | break; | ||
| 783 | #endif /* HOST */ | ||
| 784 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 785 | case OTG_STATE_B_HOST: | ||
| 786 | musb_hnp_stop(musb); | ||
| 787 | break; | ||
| 788 | case OTG_STATE_A_PERIPHERAL: | ||
| 789 | musb_hnp_stop(musb); | ||
| 790 | musb_root_disconnect(musb); | ||
| 791 | /* FALLTHROUGH */ | ||
| 792 | case OTG_STATE_B_WAIT_ACON: | ||
| 793 | /* FALLTHROUGH */ | ||
| 794 | #endif /* OTG */ | ||
| 795 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 796 | case OTG_STATE_B_PERIPHERAL: | ||
| 797 | case OTG_STATE_B_IDLE: | ||
| 798 | musb_g_disconnect(musb); | ||
| 799 | break; | ||
| 800 | #endif /* GADGET */ | ||
| 801 | default: | ||
| 802 | WARNING("unhandled DISCONNECT transition (%s)\n", | ||
| 803 | otg_state_string(musb)); | ||
| 804 | break; | ||
| 805 | } | ||
| 806 | |||
| 807 | schedule_work(&musb->irq_work); | ||
| 808 | } | ||
| 809 | |||
| 810 | if (int_usb & MUSB_INTR_SUSPEND) { | ||
| 811 | DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", | ||
| 812 | otg_state_string(musb), devctl, power); | ||
| 813 | handled = IRQ_HANDLED; | ||
| 814 | |||
| 815 | switch (musb->xceiv.state) { | ||
| 816 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 817 | case OTG_STATE_A_PERIPHERAL: | ||
| 818 | /* | ||
| 819 | * We cannot stop HNP here, devctl BDEVICE might be | ||
| 820 | * still set. | ||
| 821 | */ | ||
| 822 | break; | ||
| 823 | #endif | ||
| 824 | case OTG_STATE_B_PERIPHERAL: | ||
| 825 | musb_g_suspend(musb); | ||
| 826 | musb->is_active = is_otg_enabled(musb) | ||
| 827 | && musb->xceiv.gadget->b_hnp_enable; | ||
| 828 | if (musb->is_active) { | ||
| 829 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 830 | musb->xceiv.state = OTG_STATE_B_WAIT_ACON; | ||
| 831 | DBG(1, "HNP: Setting timer for b_ase0_brst\n"); | ||
| 832 | musb_otg_timer.data = (unsigned long)musb; | ||
| 833 | mod_timer(&musb_otg_timer, jiffies | ||
| 834 | + msecs_to_jiffies(TB_ASE0_BRST)); | ||
| 835 | #endif | ||
| 836 | } | ||
| 837 | break; | ||
| 838 | case OTG_STATE_A_WAIT_BCON: | ||
| 839 | if (musb->a_wait_bcon != 0) | ||
| 840 | musb_platform_try_idle(musb, jiffies | ||
| 841 | + msecs_to_jiffies(musb->a_wait_bcon)); | ||
| 842 | break; | ||
| 843 | case OTG_STATE_A_HOST: | ||
| 844 | musb->xceiv.state = OTG_STATE_A_SUSPEND; | ||
| 845 | musb->is_active = is_otg_enabled(musb) | ||
| 846 | && musb->xceiv.host->b_hnp_enable; | ||
| 847 | break; | ||
| 848 | case OTG_STATE_B_HOST: | ||
| 849 | /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ | ||
| 850 | DBG(1, "REVISIT: SUSPEND as B_HOST\n"); | ||
| 851 | break; | ||
| 852 | default: | ||
| 853 | /* "should not happen" */ | ||
| 854 | musb->is_active = 0; | ||
| 855 | break; | ||
| 856 | } | ||
| 857 | schedule_work(&musb->irq_work); | ||
| 858 | } | ||
| 859 | |||
| 860 | |||
| 861 | return handled; | ||
| 862 | } | ||
| 863 | |||
| 864 | /*-------------------------------------------------------------------------*/ | ||
| 865 | |||
| 866 | /* | ||
| 867 | * Program the HDRC to start (enable interrupts, dma, etc.). | ||
| 868 | */ | ||
| 869 | void musb_start(struct musb *musb) | ||
| 870 | { | ||
| 871 | void __iomem *regs = musb->mregs; | ||
| 872 | u8 devctl = musb_readb(regs, MUSB_DEVCTL); | ||
| 873 | |||
| 874 | DBG(2, "<== devctl %02x\n", devctl); | ||
| 875 | |||
| 876 | /* Set INT enable registers, enable interrupts */ | ||
| 877 | musb_writew(regs, MUSB_INTRTXE, musb->epmask); | ||
| 878 | musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); | ||
| 879 | musb_writeb(regs, MUSB_INTRUSBE, 0xf7); | ||
| 880 | |||
| 881 | musb_writeb(regs, MUSB_TESTMODE, 0); | ||
| 882 | |||
| 883 | /* put into basic highspeed mode and start session */ | ||
| 884 | musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE | ||
| 885 | | MUSB_POWER_SOFTCONN | ||
| 886 | | MUSB_POWER_HSENAB | ||
| 887 | /* ENSUSPEND wedges tusb */ | ||
| 888 | /* | MUSB_POWER_ENSUSPEND */ | ||
| 889 | ); | ||
| 890 | |||
| 891 | musb->is_active = 0; | ||
| 892 | devctl = musb_readb(regs, MUSB_DEVCTL); | ||
| 893 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
| 894 | |||
| 895 | if (is_otg_enabled(musb)) { | ||
| 896 | /* session started after: | ||
| 897 | * (a) ID-grounded irq, host mode; | ||
| 898 | * (b) vbus present/connect IRQ, peripheral mode; | ||
| 899 | * (c) peripheral initiates, using SRP | ||
| 900 | */ | ||
| 901 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | ||
| 902 | musb->is_active = 1; | ||
| 903 | else | ||
| 904 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 905 | |||
| 906 | } else if (is_host_enabled(musb)) { | ||
| 907 | /* assume ID pin is hard-wired to ground */ | ||
| 908 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 909 | |||
| 910 | } else /* peripheral is enabled */ { | ||
| 911 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | ||
| 912 | musb->is_active = 1; | ||
| 913 | } | ||
| 914 | musb_platform_enable(musb); | ||
| 915 | musb_writeb(regs, MUSB_DEVCTL, devctl); | ||
| 916 | } | ||
| 917 | |||
| 918 | |||
| 919 | static void musb_generic_disable(struct musb *musb) | ||
| 920 | { | ||
| 921 | void __iomem *mbase = musb->mregs; | ||
| 922 | u16 temp; | ||
| 923 | |||
| 924 | /* disable interrupts */ | ||
| 925 | musb_writeb(mbase, MUSB_INTRUSBE, 0); | ||
| 926 | musb_writew(mbase, MUSB_INTRTXE, 0); | ||
| 927 | musb_writew(mbase, MUSB_INTRRXE, 0); | ||
| 928 | |||
| 929 | /* off */ | ||
| 930 | musb_writeb(mbase, MUSB_DEVCTL, 0); | ||
| 931 | |||
| 932 | /* flush pending interrupts */ | ||
| 933 | temp = musb_readb(mbase, MUSB_INTRUSB); | ||
| 934 | temp = musb_readw(mbase, MUSB_INTRTX); | ||
| 935 | temp = musb_readw(mbase, MUSB_INTRRX); | ||
| 936 | |||
| 937 | } | ||
| 938 | |||
| 939 | /* | ||
| 940 | * Make the HDRC stop (disable interrupts, etc.); | ||
| 941 | * reversible by musb_start | ||
| 942 | * called on gadget driver unregister | ||
| 943 | * with controller locked, irqs blocked | ||
| 944 | * acts as a NOP unless some role activated the hardware | ||
| 945 | */ | ||
| 946 | void musb_stop(struct musb *musb) | ||
| 947 | { | ||
| 948 | /* stop IRQs, timers, ... */ | ||
| 949 | musb_platform_disable(musb); | ||
| 950 | musb_generic_disable(musb); | ||
| 951 | DBG(3, "HDRC disabled\n"); | ||
| 952 | |||
| 953 | /* FIXME | ||
| 954 | * - mark host and/or peripheral drivers unusable/inactive | ||
| 955 | * - disable DMA (and enable it in HdrcStart) | ||
| 956 | * - make sure we can musb_start() after musb_stop(); with | ||
| 957 | * OTG mode, gadget driver module rmmod/modprobe cycles that | ||
| 958 | * - ... | ||
| 959 | */ | ||
| 960 | musb_platform_try_idle(musb, 0); | ||
| 961 | } | ||
| 962 | |||
| 963 | static void musb_shutdown(struct platform_device *pdev) | ||
| 964 | { | ||
| 965 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
| 966 | unsigned long flags; | ||
| 967 | |||
| 968 | spin_lock_irqsave(&musb->lock, flags); | ||
| 969 | musb_platform_disable(musb); | ||
| 970 | musb_generic_disable(musb); | ||
| 971 | if (musb->clock) { | ||
| 972 | clk_put(musb->clock); | ||
| 973 | musb->clock = NULL; | ||
| 974 | } | ||
| 975 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 976 | |||
| 977 | /* FIXME power down */ | ||
| 978 | } | ||
| 979 | |||
| 980 | |||
| 981 | /*-------------------------------------------------------------------------*/ | ||
| 982 | |||
| 983 | /* | ||
| 984 | * The silicon either has hard-wired endpoint configurations, or else | ||
| 985 | * "dynamic fifo" sizing. The driver has support for both, though at this | ||
| 986 | * writing only the dynamic sizing is very well tested. We use normal | ||
| 987 | * idioms to so both modes are compile-tested, but dead code elimination | ||
| 988 | * leaves only the relevant one in the object file. | ||
| 989 | * | ||
| 990 | * We don't currently use dynamic fifo setup capability to do anything | ||
| 991 | * more than selecting one of a bunch of predefined configurations. | ||
| 992 | */ | ||
| 993 | #if defined(CONFIG_USB_TUSB6010) || \ | ||
| 994 | defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) | ||
| 995 | static ushort __initdata fifo_mode = 4; | ||
| 996 | #else | ||
| 997 | static ushort __initdata fifo_mode = 2; | ||
| 998 | #endif | ||
| 999 | |||
| 1000 | /* "modprobe ... fifo_mode=1" etc */ | ||
| 1001 | module_param(fifo_mode, ushort, 0); | ||
| 1002 | MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); | ||
| 1003 | |||
| 1004 | |||
| 1005 | enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); | ||
| 1006 | enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); | ||
| 1007 | |||
| 1008 | struct fifo_cfg { | ||
| 1009 | u8 hw_ep_num; | ||
| 1010 | enum fifo_style style; | ||
| 1011 | enum buf_mode mode; | ||
| 1012 | u16 maxpacket; | ||
| 1013 | }; | ||
| 1014 | |||
| 1015 | /* | ||
| 1016 | * tables defining fifo_mode values. define more if you like. | ||
| 1017 | * for host side, make sure both halves of ep1 are set up. | ||
| 1018 | */ | ||
| 1019 | |||
| 1020 | /* mode 0 - fits in 2KB */ | ||
| 1021 | static struct fifo_cfg __initdata mode_0_cfg[] = { | ||
| 1022 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1023 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1024 | { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, }, | ||
| 1025 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1026 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1027 | }; | ||
| 1028 | |||
| 1029 | /* mode 1 - fits in 4KB */ | ||
| 1030 | static struct fifo_cfg __initdata mode_1_cfg[] = { | ||
| 1031 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
| 1032 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
| 1033 | { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
| 1034 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1035 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1036 | }; | ||
| 1037 | |||
| 1038 | /* mode 2 - fits in 4KB */ | ||
| 1039 | static struct fifo_cfg __initdata mode_2_cfg[] = { | ||
| 1040 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1041 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1042 | { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1043 | { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1044 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1045 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1046 | }; | ||
| 1047 | |||
| 1048 | /* mode 3 - fits in 4KB */ | ||
| 1049 | static struct fifo_cfg __initdata mode_3_cfg[] = { | ||
| 1050 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
| 1051 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
| 1052 | { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1053 | { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1054 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1055 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
| 1056 | }; | ||
| 1057 | |||
| 1058 | /* mode 4 - fits in 16KB */ | ||
| 1059 | static struct fifo_cfg __initdata mode_4_cfg[] = { | ||
| 1060 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1061 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1062 | { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1063 | { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1064 | { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1065 | { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1066 | { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1067 | { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1068 | { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1069 | { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1070 | { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1071 | { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1072 | { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1073 | { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1074 | { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1075 | { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1076 | { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1077 | { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1078 | { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1079 | { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1080 | { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1081 | { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1082 | { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1083 | { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1084 | { .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, }, | ||
| 1085 | { .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, }, | ||
| 1086 | { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, | ||
| 1087 | { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, | ||
| 1088 | }; | ||
| 1089 | |||
| 1090 | |||
| 1091 | /* | ||
| 1092 | * configure a fifo; for non-shared endpoints, this may be called | ||
| 1093 | * once for a tx fifo and once for an rx fifo. | ||
| 1094 | * | ||
| 1095 | * returns negative errno or offset for next fifo. | ||
| 1096 | */ | ||
| 1097 | static int __init | ||
| 1098 | fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, | ||
| 1099 | const struct fifo_cfg *cfg, u16 offset) | ||
| 1100 | { | ||
| 1101 | void __iomem *mbase = musb->mregs; | ||
| 1102 | int size = 0; | ||
| 1103 | u16 maxpacket = cfg->maxpacket; | ||
| 1104 | u16 c_off = offset >> 3; | ||
| 1105 | u8 c_size; | ||
| 1106 | |||
| 1107 | /* expect hw_ep has already been zero-initialized */ | ||
| 1108 | |||
| 1109 | size = ffs(max(maxpacket, (u16) 8)) - 1; | ||
| 1110 | maxpacket = 1 << size; | ||
| 1111 | |||
| 1112 | c_size = size - 3; | ||
| 1113 | if (cfg->mode == BUF_DOUBLE) { | ||
| 1114 | if ((offset + (maxpacket << 1)) > | ||
| 1115 | (1 << (musb->config->ram_bits + 2))) | ||
| 1116 | return -EMSGSIZE; | ||
| 1117 | c_size |= MUSB_FIFOSZ_DPB; | ||
| 1118 | } else { | ||
| 1119 | if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2))) | ||
| 1120 | return -EMSGSIZE; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | /* configure the FIFO */ | ||
| 1124 | musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum); | ||
| 1125 | |||
| 1126 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1127 | /* EP0 reserved endpoint for control, bidirectional; | ||
| 1128 | * EP1 reserved for bulk, two unidirection halves. | ||
| 1129 | */ | ||
| 1130 | if (hw_ep->epnum == 1) | ||
| 1131 | musb->bulk_ep = hw_ep; | ||
| 1132 | /* REVISIT error check: be sure ep0 can both rx and tx ... */ | ||
| 1133 | #endif | ||
| 1134 | switch (cfg->style) { | ||
| 1135 | case FIFO_TX: | ||
| 1136 | musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); | ||
| 1137 | musb_writew(mbase, MUSB_TXFIFOADD, c_off); | ||
| 1138 | hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); | ||
| 1139 | hw_ep->max_packet_sz_tx = maxpacket; | ||
| 1140 | break; | ||
| 1141 | case FIFO_RX: | ||
| 1142 | musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); | ||
| 1143 | musb_writew(mbase, MUSB_RXFIFOADD, c_off); | ||
| 1144 | hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); | ||
| 1145 | hw_ep->max_packet_sz_rx = maxpacket; | ||
| 1146 | break; | ||
| 1147 | case FIFO_RXTX: | ||
| 1148 | musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); | ||
| 1149 | musb_writew(mbase, MUSB_TXFIFOADD, c_off); | ||
| 1150 | hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); | ||
| 1151 | hw_ep->max_packet_sz_rx = maxpacket; | ||
| 1152 | |||
| 1153 | musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); | ||
| 1154 | musb_writew(mbase, MUSB_RXFIFOADD, c_off); | ||
| 1155 | hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; | ||
| 1156 | hw_ep->max_packet_sz_tx = maxpacket; | ||
| 1157 | |||
| 1158 | hw_ep->is_shared_fifo = true; | ||
| 1159 | break; | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | /* NOTE rx and tx endpoint irqs aren't managed separately, | ||
| 1163 | * which happens to be ok | ||
| 1164 | */ | ||
| 1165 | musb->epmask |= (1 << hw_ep->epnum); | ||
| 1166 | |||
| 1167 | return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0)); | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | static struct fifo_cfg __initdata ep0_cfg = { | ||
| 1171 | .style = FIFO_RXTX, .maxpacket = 64, | ||
| 1172 | }; | ||
| 1173 | |||
| 1174 | static int __init ep_config_from_table(struct musb *musb) | ||
| 1175 | { | ||
| 1176 | const struct fifo_cfg *cfg; | ||
| 1177 | unsigned i, n; | ||
| 1178 | int offset; | ||
| 1179 | struct musb_hw_ep *hw_ep = musb->endpoints; | ||
| 1180 | |||
| 1181 | switch (fifo_mode) { | ||
| 1182 | default: | ||
| 1183 | fifo_mode = 0; | ||
| 1184 | /* FALLTHROUGH */ | ||
| 1185 | case 0: | ||
| 1186 | cfg = mode_0_cfg; | ||
| 1187 | n = ARRAY_SIZE(mode_0_cfg); | ||
| 1188 | break; | ||
| 1189 | case 1: | ||
| 1190 | cfg = mode_1_cfg; | ||
| 1191 | n = ARRAY_SIZE(mode_1_cfg); | ||
| 1192 | break; | ||
| 1193 | case 2: | ||
| 1194 | cfg = mode_2_cfg; | ||
| 1195 | n = ARRAY_SIZE(mode_2_cfg); | ||
| 1196 | break; | ||
| 1197 | case 3: | ||
| 1198 | cfg = mode_3_cfg; | ||
| 1199 | n = ARRAY_SIZE(mode_3_cfg); | ||
| 1200 | break; | ||
| 1201 | case 4: | ||
| 1202 | cfg = mode_4_cfg; | ||
| 1203 | n = ARRAY_SIZE(mode_4_cfg); | ||
| 1204 | break; | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | printk(KERN_DEBUG "%s: setup fifo_mode %d\n", | ||
| 1208 | musb_driver_name, fifo_mode); | ||
| 1209 | |||
| 1210 | |||
| 1211 | offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0); | ||
| 1212 | /* assert(offset > 0) */ | ||
| 1213 | |||
| 1214 | /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would | ||
| 1215 | * be better than static musb->config->num_eps and DYN_FIFO_SIZE... | ||
| 1216 | */ | ||
| 1217 | |||
| 1218 | for (i = 0; i < n; i++) { | ||
| 1219 | u8 epn = cfg->hw_ep_num; | ||
| 1220 | |||
| 1221 | if (epn >= musb->config->num_eps) { | ||
| 1222 | pr_debug("%s: invalid ep %d\n", | ||
| 1223 | musb_driver_name, epn); | ||
| 1224 | continue; | ||
| 1225 | } | ||
| 1226 | offset = fifo_setup(musb, hw_ep + epn, cfg++, offset); | ||
| 1227 | if (offset < 0) { | ||
| 1228 | pr_debug("%s: mem overrun, ep %d\n", | ||
| 1229 | musb_driver_name, epn); | ||
| 1230 | return -EINVAL; | ||
| 1231 | } | ||
| 1232 | epn++; | ||
| 1233 | musb->nr_endpoints = max(epn, musb->nr_endpoints); | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", | ||
| 1237 | musb_driver_name, | ||
| 1238 | n + 1, musb->config->num_eps * 2 - 1, | ||
| 1239 | offset, (1 << (musb->config->ram_bits + 2))); | ||
| 1240 | |||
| 1241 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1242 | if (!musb->bulk_ep) { | ||
| 1243 | pr_debug("%s: missing bulk\n", musb_driver_name); | ||
| 1244 | return -EINVAL; | ||
| 1245 | } | ||
| 1246 | #endif | ||
| 1247 | |||
| 1248 | return 0; | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | |||
| 1252 | /* | ||
| 1253 | * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false | ||
| 1254 | * @param musb the controller | ||
| 1255 | */ | ||
| 1256 | static int __init ep_config_from_hw(struct musb *musb) | ||
| 1257 | { | ||
| 1258 | u8 epnum = 0, reg; | ||
| 1259 | struct musb_hw_ep *hw_ep; | ||
| 1260 | void *mbase = musb->mregs; | ||
| 1261 | |||
| 1262 | DBG(2, "<== static silicon ep config\n"); | ||
| 1263 | |||
| 1264 | /* FIXME pick up ep0 maxpacket size */ | ||
| 1265 | |||
| 1266 | for (epnum = 1; epnum < musb->config->num_eps; epnum++) { | ||
| 1267 | musb_ep_select(mbase, epnum); | ||
| 1268 | hw_ep = musb->endpoints + epnum; | ||
| 1269 | |||
| 1270 | /* read from core using indexed model */ | ||
| 1271 | reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE); | ||
| 1272 | if (!reg) { | ||
| 1273 | /* 0's returned when no more endpoints */ | ||
| 1274 | break; | ||
| 1275 | } | ||
| 1276 | musb->nr_endpoints++; | ||
| 1277 | musb->epmask |= (1 << epnum); | ||
| 1278 | |||
| 1279 | hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f); | ||
| 1280 | |||
| 1281 | /* shared TX/RX FIFO? */ | ||
| 1282 | if ((reg & 0xf0) == 0xf0) { | ||
| 1283 | hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx; | ||
| 1284 | hw_ep->is_shared_fifo = true; | ||
| 1285 | continue; | ||
| 1286 | } else { | ||
| 1287 | hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4); | ||
| 1288 | hw_ep->is_shared_fifo = false; | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | /* FIXME set up hw_ep->{rx,tx}_double_buffered */ | ||
| 1292 | |||
| 1293 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1294 | /* pick an RX/TX endpoint for bulk */ | ||
| 1295 | if (hw_ep->max_packet_sz_tx < 512 | ||
| 1296 | || hw_ep->max_packet_sz_rx < 512) | ||
| 1297 | continue; | ||
| 1298 | |||
| 1299 | /* REVISIT: this algorithm is lazy, we should at least | ||
| 1300 | * try to pick a double buffered endpoint. | ||
| 1301 | */ | ||
| 1302 | if (musb->bulk_ep) | ||
| 1303 | continue; | ||
| 1304 | musb->bulk_ep = hw_ep; | ||
| 1305 | #endif | ||
| 1306 | } | ||
| 1307 | |||
| 1308 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1309 | if (!musb->bulk_ep) { | ||
| 1310 | pr_debug("%s: missing bulk\n", musb_driver_name); | ||
| 1311 | return -EINVAL; | ||
| 1312 | } | ||
| 1313 | #endif | ||
| 1314 | |||
| 1315 | return 0; | ||
| 1316 | } | ||
| 1317 | |||
| 1318 | enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; | ||
| 1319 | |||
| 1320 | /* Initialize MUSB (M)HDRC part of the USB hardware subsystem; | ||
| 1321 | * configure endpoints, or take their config from silicon | ||
| 1322 | */ | ||
| 1323 | static int __init musb_core_init(u16 musb_type, struct musb *musb) | ||
| 1324 | { | ||
| 1325 | #ifdef MUSB_AHB_ID | ||
| 1326 | u32 data; | ||
| 1327 | #endif | ||
| 1328 | u8 reg; | ||
| 1329 | char *type; | ||
| 1330 | u16 hwvers, rev_major, rev_minor; | ||
| 1331 | char aInfo[78], aRevision[32], aDate[12]; | ||
| 1332 | void __iomem *mbase = musb->mregs; | ||
| 1333 | int status = 0; | ||
| 1334 | int i; | ||
| 1335 | |||
| 1336 | /* log core options (read using indexed model) */ | ||
| 1337 | musb_ep_select(mbase, 0); | ||
| 1338 | reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); | ||
| 1339 | |||
| 1340 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); | ||
| 1341 | if (reg & MUSB_CONFIGDATA_DYNFIFO) | ||
| 1342 | strcat(aInfo, ", dyn FIFOs"); | ||
| 1343 | if (reg & MUSB_CONFIGDATA_MPRXE) { | ||
| 1344 | strcat(aInfo, ", bulk combine"); | ||
| 1345 | #ifdef C_MP_RX | ||
| 1346 | musb->bulk_combine = true; | ||
| 1347 | #else | ||
| 1348 | strcat(aInfo, " (X)"); /* no driver support */ | ||
| 1349 | #endif | ||
| 1350 | } | ||
| 1351 | if (reg & MUSB_CONFIGDATA_MPTXE) { | ||
| 1352 | strcat(aInfo, ", bulk split"); | ||
| 1353 | #ifdef C_MP_TX | ||
| 1354 | musb->bulk_split = true; | ||
| 1355 | #else | ||
| 1356 | strcat(aInfo, " (X)"); /* no driver support */ | ||
| 1357 | #endif | ||
| 1358 | } | ||
| 1359 | if (reg & MUSB_CONFIGDATA_HBRXE) { | ||
| 1360 | strcat(aInfo, ", HB-ISO Rx"); | ||
| 1361 | strcat(aInfo, " (X)"); /* no driver support */ | ||
| 1362 | } | ||
| 1363 | if (reg & MUSB_CONFIGDATA_HBTXE) { | ||
| 1364 | strcat(aInfo, ", HB-ISO Tx"); | ||
| 1365 | strcat(aInfo, " (X)"); /* no driver support */ | ||
| 1366 | } | ||
| 1367 | if (reg & MUSB_CONFIGDATA_SOFTCONE) | ||
| 1368 | strcat(aInfo, ", SoftConn"); | ||
| 1369 | |||
| 1370 | printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", | ||
| 1371 | musb_driver_name, reg, aInfo); | ||
| 1372 | |||
| 1373 | #ifdef MUSB_AHB_ID | ||
| 1374 | data = musb_readl(mbase, 0x404); | ||
| 1375 | sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff), | ||
| 1376 | (data >> 16) & 0xff, (data >> 24) & 0xff); | ||
| 1377 | /* FIXME ID2 and ID3 are unused */ | ||
| 1378 | data = musb_readl(mbase, 0x408); | ||
| 1379 | printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data); | ||
| 1380 | data = musb_readl(mbase, 0x40c); | ||
| 1381 | printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data); | ||
| 1382 | reg = musb_readb(mbase, 0x400); | ||
| 1383 | musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC; | ||
| 1384 | #else | ||
| 1385 | aDate[0] = 0; | ||
| 1386 | #endif | ||
| 1387 | if (MUSB_CONTROLLER_MHDRC == musb_type) { | ||
| 1388 | musb->is_multipoint = 1; | ||
| 1389 | type = "M"; | ||
| 1390 | } else { | ||
| 1391 | musb->is_multipoint = 0; | ||
| 1392 | type = ""; | ||
| 1393 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1394 | #ifndef CONFIG_USB_OTG_BLACKLIST_HUB | ||
| 1395 | printk(KERN_ERR | ||
| 1396 | "%s: kernel must blacklist external hubs\n", | ||
| 1397 | musb_driver_name); | ||
| 1398 | #endif | ||
| 1399 | #endif | ||
| 1400 | } | ||
| 1401 | |||
| 1402 | /* log release info */ | ||
| 1403 | hwvers = musb_readw(mbase, MUSB_HWVERS); | ||
| 1404 | rev_major = (hwvers >> 10) & 0x1f; | ||
| 1405 | rev_minor = hwvers & 0x3ff; | ||
| 1406 | snprintf(aRevision, 32, "%d.%d%s", rev_major, | ||
| 1407 | rev_minor, (hwvers & 0x8000) ? "RC" : ""); | ||
| 1408 | printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", | ||
| 1409 | musb_driver_name, type, aRevision, aDate); | ||
| 1410 | |||
| 1411 | /* configure ep0 */ | ||
| 1412 | musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; | ||
| 1413 | musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; | ||
| 1414 | |||
| 1415 | /* discover endpoint configuration */ | ||
| 1416 | musb->nr_endpoints = 1; | ||
| 1417 | musb->epmask = 1; | ||
| 1418 | |||
| 1419 | if (reg & MUSB_CONFIGDATA_DYNFIFO) { | ||
| 1420 | if (musb->config->dyn_fifo) | ||
| 1421 | status = ep_config_from_table(musb); | ||
| 1422 | else { | ||
| 1423 | ERR("reconfigure software for Dynamic FIFOs\n"); | ||
| 1424 | status = -ENODEV; | ||
| 1425 | } | ||
| 1426 | } else { | ||
| 1427 | if (!musb->config->dyn_fifo) | ||
| 1428 | status = ep_config_from_hw(musb); | ||
| 1429 | else { | ||
| 1430 | ERR("reconfigure software for static FIFOs\n"); | ||
| 1431 | return -ENODEV; | ||
| 1432 | } | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | if (status < 0) | ||
| 1436 | return status; | ||
| 1437 | |||
| 1438 | /* finish init, and print endpoint config */ | ||
| 1439 | for (i = 0; i < musb->nr_endpoints; i++) { | ||
| 1440 | struct musb_hw_ep *hw_ep = musb->endpoints + i; | ||
| 1441 | |||
| 1442 | hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; | ||
| 1443 | #ifdef CONFIG_USB_TUSB6010 | ||
| 1444 | hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); | ||
| 1445 | hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); | ||
| 1446 | hw_ep->fifo_sync_va = | ||
| 1447 | musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i); | ||
| 1448 | |||
| 1449 | if (i == 0) | ||
| 1450 | hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF; | ||
| 1451 | else | ||
| 1452 | hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2); | ||
| 1453 | #endif | ||
| 1454 | |||
| 1455 | hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; | ||
| 1456 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1457 | hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase; | ||
| 1458 | hw_ep->rx_reinit = 1; | ||
| 1459 | hw_ep->tx_reinit = 1; | ||
| 1460 | #endif | ||
| 1461 | |||
| 1462 | if (hw_ep->max_packet_sz_tx) { | ||
| 1463 | printk(KERN_DEBUG | ||
| 1464 | "%s: hw_ep %d%s, %smax %d\n", | ||
| 1465 | musb_driver_name, i, | ||
| 1466 | hw_ep->is_shared_fifo ? "shared" : "tx", | ||
| 1467 | hw_ep->tx_double_buffered | ||
| 1468 | ? "doublebuffer, " : "", | ||
| 1469 | hw_ep->max_packet_sz_tx); | ||
| 1470 | } | ||
| 1471 | if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { | ||
| 1472 | printk(KERN_DEBUG | ||
| 1473 | "%s: hw_ep %d%s, %smax %d\n", | ||
| 1474 | musb_driver_name, i, | ||
| 1475 | "rx", | ||
| 1476 | hw_ep->rx_double_buffered | ||
| 1477 | ? "doublebuffer, " : "", | ||
| 1478 | hw_ep->max_packet_sz_rx); | ||
| 1479 | } | ||
| 1480 | if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) | ||
| 1481 | DBG(1, "hw_ep %d not configured\n", i); | ||
| 1482 | } | ||
| 1483 | |||
| 1484 | return 0; | ||
| 1485 | } | ||
| 1486 | |||
| 1487 | /*-------------------------------------------------------------------------*/ | ||
| 1488 | |||
| 1489 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | ||
| 1490 | |||
| 1491 | static irqreturn_t generic_interrupt(int irq, void *__hci) | ||
| 1492 | { | ||
| 1493 | unsigned long flags; | ||
| 1494 | irqreturn_t retval = IRQ_NONE; | ||
| 1495 | struct musb *musb = __hci; | ||
| 1496 | |||
| 1497 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1498 | |||
| 1499 | musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); | ||
| 1500 | musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); | ||
| 1501 | musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); | ||
| 1502 | |||
| 1503 | if (musb->int_usb || musb->int_tx || musb->int_rx) | ||
| 1504 | retval = musb_interrupt(musb); | ||
| 1505 | |||
| 1506 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1507 | |||
| 1508 | /* REVISIT we sometimes get spurious IRQs on g_ep0 | ||
| 1509 | * not clear why... | ||
| 1510 | */ | ||
| 1511 | if (retval != IRQ_HANDLED) | ||
| 1512 | DBG(5, "spurious?\n"); | ||
| 1513 | |||
| 1514 | return IRQ_HANDLED; | ||
| 1515 | } | ||
| 1516 | |||
| 1517 | #else | ||
| 1518 | #define generic_interrupt NULL | ||
| 1519 | #endif | ||
| 1520 | |||
| 1521 | /* | ||
| 1522 | * handle all the irqs defined by the HDRC core. for now we expect: other | ||
| 1523 | * irq sources (phy, dma, etc) will be handled first, musb->int_* values | ||
| 1524 | * will be assigned, and the irq will already have been acked. | ||
| 1525 | * | ||
| 1526 | * called in irq context with spinlock held, irqs blocked | ||
| 1527 | */ | ||
| 1528 | irqreturn_t musb_interrupt(struct musb *musb) | ||
| 1529 | { | ||
| 1530 | irqreturn_t retval = IRQ_NONE; | ||
| 1531 | u8 devctl, power; | ||
| 1532 | int ep_num; | ||
| 1533 | u32 reg; | ||
| 1534 | |||
| 1535 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 1536 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
| 1537 | |||
| 1538 | DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n", | ||
| 1539 | (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", | ||
| 1540 | musb->int_usb, musb->int_tx, musb->int_rx); | ||
| 1541 | |||
| 1542 | /* the core can interrupt us for multiple reasons; docs have | ||
| 1543 | * a generic interrupt flowchart to follow | ||
| 1544 | */ | ||
| 1545 | if (musb->int_usb & STAGE0_MASK) | ||
| 1546 | retval |= musb_stage0_irq(musb, musb->int_usb, | ||
| 1547 | devctl, power); | ||
| 1548 | |||
| 1549 | /* "stage 1" is handling endpoint irqs */ | ||
| 1550 | |||
| 1551 | /* handle endpoint 0 first */ | ||
| 1552 | if (musb->int_tx & 1) { | ||
| 1553 | if (devctl & MUSB_DEVCTL_HM) | ||
| 1554 | retval |= musb_h_ep0_irq(musb); | ||
| 1555 | else | ||
| 1556 | retval |= musb_g_ep0_irq(musb); | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | /* RX on endpoints 1-15 */ | ||
| 1560 | reg = musb->int_rx >> 1; | ||
| 1561 | ep_num = 1; | ||
| 1562 | while (reg) { | ||
| 1563 | if (reg & 1) { | ||
| 1564 | /* musb_ep_select(musb->mregs, ep_num); */ | ||
| 1565 | /* REVISIT just retval = ep->rx_irq(...) */ | ||
| 1566 | retval = IRQ_HANDLED; | ||
| 1567 | if (devctl & MUSB_DEVCTL_HM) { | ||
| 1568 | if (is_host_capable()) | ||
| 1569 | musb_host_rx(musb, ep_num); | ||
| 1570 | } else { | ||
| 1571 | if (is_peripheral_capable()) | ||
| 1572 | musb_g_rx(musb, ep_num); | ||
| 1573 | } | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | reg >>= 1; | ||
| 1577 | ep_num++; | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | /* TX on endpoints 1-15 */ | ||
| 1581 | reg = musb->int_tx >> 1; | ||
| 1582 | ep_num = 1; | ||
| 1583 | while (reg) { | ||
| 1584 | if (reg & 1) { | ||
| 1585 | /* musb_ep_select(musb->mregs, ep_num); */ | ||
| 1586 | /* REVISIT just retval |= ep->tx_irq(...) */ | ||
| 1587 | retval = IRQ_HANDLED; | ||
| 1588 | if (devctl & MUSB_DEVCTL_HM) { | ||
| 1589 | if (is_host_capable()) | ||
| 1590 | musb_host_tx(musb, ep_num); | ||
| 1591 | } else { | ||
| 1592 | if (is_peripheral_capable()) | ||
| 1593 | musb_g_tx(musb, ep_num); | ||
| 1594 | } | ||
| 1595 | } | ||
| 1596 | reg >>= 1; | ||
| 1597 | ep_num++; | ||
| 1598 | } | ||
| 1599 | |||
| 1600 | /* finish handling "global" interrupts after handling fifos */ | ||
| 1601 | if (musb->int_usb) | ||
| 1602 | retval |= musb_stage2_irq(musb, | ||
| 1603 | musb->int_usb, devctl, power); | ||
| 1604 | |||
| 1605 | return retval; | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | |||
| 1609 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 1610 | static int __initdata use_dma = 1; | ||
| 1611 | |||
| 1612 | /* "modprobe ... use_dma=0" etc */ | ||
| 1613 | module_param(use_dma, bool, 0); | ||
| 1614 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | ||
| 1615 | |||
| 1616 | void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) | ||
| 1617 | { | ||
| 1618 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 1619 | |||
| 1620 | /* called with controller lock already held */ | ||
| 1621 | |||
| 1622 | if (!epnum) { | ||
| 1623 | #ifndef CONFIG_USB_TUSB_OMAP_DMA | ||
| 1624 | if (!is_cppi_enabled()) { | ||
| 1625 | /* endpoint 0 */ | ||
| 1626 | if (devctl & MUSB_DEVCTL_HM) | ||
| 1627 | musb_h_ep0_irq(musb); | ||
| 1628 | else | ||
| 1629 | musb_g_ep0_irq(musb); | ||
| 1630 | } | ||
| 1631 | #endif | ||
| 1632 | } else { | ||
| 1633 | /* endpoints 1..15 */ | ||
| 1634 | if (transmit) { | ||
| 1635 | if (devctl & MUSB_DEVCTL_HM) { | ||
| 1636 | if (is_host_capable()) | ||
| 1637 | musb_host_tx(musb, epnum); | ||
| 1638 | } else { | ||
| 1639 | if (is_peripheral_capable()) | ||
| 1640 | musb_g_tx(musb, epnum); | ||
| 1641 | } | ||
| 1642 | } else { | ||
| 1643 | /* receive */ | ||
| 1644 | if (devctl & MUSB_DEVCTL_HM) { | ||
| 1645 | if (is_host_capable()) | ||
| 1646 | musb_host_rx(musb, epnum); | ||
| 1647 | } else { | ||
| 1648 | if (is_peripheral_capable()) | ||
| 1649 | musb_g_rx(musb, epnum); | ||
| 1650 | } | ||
| 1651 | } | ||
| 1652 | } | ||
| 1653 | } | ||
| 1654 | |||
| 1655 | #else | ||
| 1656 | #define use_dma 0 | ||
| 1657 | #endif | ||
| 1658 | |||
| 1659 | /*-------------------------------------------------------------------------*/ | ||
| 1660 | |||
| 1661 | #ifdef CONFIG_SYSFS | ||
| 1662 | |||
| 1663 | static ssize_t | ||
| 1664 | musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 1665 | { | ||
| 1666 | struct musb *musb = dev_to_musb(dev); | ||
| 1667 | unsigned long flags; | ||
| 1668 | int ret = -EINVAL; | ||
| 1669 | |||
| 1670 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1671 | ret = sprintf(buf, "%s\n", otg_state_string(musb)); | ||
| 1672 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1673 | |||
| 1674 | return ret; | ||
| 1675 | } | ||
| 1676 | |||
| 1677 | static ssize_t | ||
| 1678 | musb_mode_store(struct device *dev, struct device_attribute *attr, | ||
| 1679 | const char *buf, size_t n) | ||
| 1680 | { | ||
| 1681 | struct musb *musb = dev_to_musb(dev); | ||
| 1682 | unsigned long flags; | ||
| 1683 | |||
| 1684 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1685 | if (!strncmp(buf, "host", 4)) | ||
| 1686 | musb_platform_set_mode(musb, MUSB_HOST); | ||
| 1687 | if (!strncmp(buf, "peripheral", 10)) | ||
| 1688 | musb_platform_set_mode(musb, MUSB_PERIPHERAL); | ||
| 1689 | if (!strncmp(buf, "otg", 3)) | ||
| 1690 | musb_platform_set_mode(musb, MUSB_OTG); | ||
| 1691 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1692 | |||
| 1693 | return n; | ||
| 1694 | } | ||
| 1695 | static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store); | ||
| 1696 | |||
| 1697 | static ssize_t | ||
| 1698 | musb_vbus_store(struct device *dev, struct device_attribute *attr, | ||
| 1699 | const char *buf, size_t n) | ||
| 1700 | { | ||
| 1701 | struct musb *musb = dev_to_musb(dev); | ||
| 1702 | unsigned long flags; | ||
| 1703 | unsigned long val; | ||
| 1704 | |||
| 1705 | if (sscanf(buf, "%lu", &val) < 1) { | ||
| 1706 | printk(KERN_ERR "Invalid VBUS timeout ms value\n"); | ||
| 1707 | return -EINVAL; | ||
| 1708 | } | ||
| 1709 | |||
| 1710 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1711 | musb->a_wait_bcon = val; | ||
| 1712 | if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON) | ||
| 1713 | musb->is_active = 0; | ||
| 1714 | musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); | ||
| 1715 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1716 | |||
| 1717 | return n; | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | static ssize_t | ||
| 1721 | musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 1722 | { | ||
| 1723 | struct musb *musb = dev_to_musb(dev); | ||
| 1724 | unsigned long flags; | ||
| 1725 | unsigned long val; | ||
| 1726 | int vbus; | ||
| 1727 | |||
| 1728 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1729 | val = musb->a_wait_bcon; | ||
| 1730 | vbus = musb_platform_get_vbus_status(musb); | ||
| 1731 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1732 | |||
| 1733 | return sprintf(buf, "Vbus %s, timeout %lu\n", | ||
| 1734 | vbus ? "on" : "off", val); | ||
| 1735 | } | ||
| 1736 | static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); | ||
| 1737 | |||
| 1738 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 1739 | |||
| 1740 | /* Gadget drivers can't know that a host is connected so they might want | ||
| 1741 | * to start SRP, but users can. This allows userspace to trigger SRP. | ||
| 1742 | */ | ||
| 1743 | static ssize_t | ||
| 1744 | musb_srp_store(struct device *dev, struct device_attribute *attr, | ||
| 1745 | const char *buf, size_t n) | ||
| 1746 | { | ||
| 1747 | struct musb *musb = dev_to_musb(dev); | ||
| 1748 | unsigned short srp; | ||
| 1749 | |||
| 1750 | if (sscanf(buf, "%hu", &srp) != 1 | ||
| 1751 | || (srp != 1)) { | ||
| 1752 | printk(KERN_ERR "SRP: Value must be 1\n"); | ||
| 1753 | return -EINVAL; | ||
| 1754 | } | ||
| 1755 | |||
| 1756 | if (srp == 1) | ||
| 1757 | musb_g_wakeup(musb); | ||
| 1758 | |||
| 1759 | return n; | ||
| 1760 | } | ||
| 1761 | static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); | ||
| 1762 | |||
| 1763 | #endif /* CONFIG_USB_GADGET_MUSB_HDRC */ | ||
| 1764 | |||
| 1765 | #endif /* sysfs */ | ||
| 1766 | |||
| 1767 | /* Only used to provide driver mode change events */ | ||
| 1768 | static void musb_irq_work(struct work_struct *data) | ||
| 1769 | { | ||
| 1770 | struct musb *musb = container_of(data, struct musb, irq_work); | ||
| 1771 | static int old_state; | ||
| 1772 | |||
| 1773 | if (musb->xceiv.state != old_state) { | ||
| 1774 | old_state = musb->xceiv.state; | ||
| 1775 | sysfs_notify(&musb->controller->kobj, NULL, "mode"); | ||
| 1776 | } | ||
| 1777 | } | ||
| 1778 | |||
| 1779 | /* -------------------------------------------------------------------------- | ||
| 1780 | * Init support | ||
| 1781 | */ | ||
| 1782 | |||
| 1783 | static struct musb *__init | ||
| 1784 | allocate_instance(struct device *dev, | ||
| 1785 | struct musb_hdrc_config *config, void __iomem *mbase) | ||
| 1786 | { | ||
| 1787 | struct musb *musb; | ||
| 1788 | struct musb_hw_ep *ep; | ||
| 1789 | int epnum; | ||
| 1790 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1791 | struct usb_hcd *hcd; | ||
| 1792 | |||
| 1793 | hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id); | ||
| 1794 | if (!hcd) | ||
| 1795 | return NULL; | ||
| 1796 | /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ | ||
| 1797 | |||
| 1798 | musb = hcd_to_musb(hcd); | ||
| 1799 | INIT_LIST_HEAD(&musb->control); | ||
| 1800 | INIT_LIST_HEAD(&musb->in_bulk); | ||
| 1801 | INIT_LIST_HEAD(&musb->out_bulk); | ||
| 1802 | |||
| 1803 | hcd->uses_new_polling = 1; | ||
| 1804 | |||
| 1805 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | ||
| 1806 | #else | ||
| 1807 | musb = kzalloc(sizeof *musb, GFP_KERNEL); | ||
| 1808 | if (!musb) | ||
| 1809 | return NULL; | ||
| 1810 | dev_set_drvdata(dev, musb); | ||
| 1811 | |||
| 1812 | #endif | ||
| 1813 | |||
| 1814 | musb->mregs = mbase; | ||
| 1815 | musb->ctrl_base = mbase; | ||
| 1816 | musb->nIrq = -ENODEV; | ||
| 1817 | musb->config = config; | ||
| 1818 | for (epnum = 0, ep = musb->endpoints; | ||
| 1819 | epnum < musb->config->num_eps; | ||
| 1820 | epnum++, ep++) { | ||
| 1821 | |||
| 1822 | ep->musb = musb; | ||
| 1823 | ep->epnum = epnum; | ||
| 1824 | } | ||
| 1825 | |||
| 1826 | musb->controller = dev; | ||
| 1827 | return musb; | ||
| 1828 | } | ||
| 1829 | |||
| 1830 | static void musb_free(struct musb *musb) | ||
| 1831 | { | ||
| 1832 | /* this has multiple entry modes. it handles fault cleanup after | ||
| 1833 | * probe(), where things may be partially set up, as well as rmmod | ||
| 1834 | * cleanup after everything's been de-activated. | ||
| 1835 | */ | ||
| 1836 | |||
| 1837 | #ifdef CONFIG_SYSFS | ||
| 1838 | device_remove_file(musb->controller, &dev_attr_mode); | ||
| 1839 | device_remove_file(musb->controller, &dev_attr_vbus); | ||
| 1840 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 1841 | device_remove_file(musb->controller, &dev_attr_srp); | ||
| 1842 | #endif | ||
| 1843 | #endif | ||
| 1844 | |||
| 1845 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 1846 | musb_gadget_cleanup(musb); | ||
| 1847 | #endif | ||
| 1848 | |||
| 1849 | if (musb->nIrq >= 0) { | ||
| 1850 | disable_irq_wake(musb->nIrq); | ||
| 1851 | free_irq(musb->nIrq, musb); | ||
| 1852 | } | ||
| 1853 | if (is_dma_capable() && musb->dma_controller) { | ||
| 1854 | struct dma_controller *c = musb->dma_controller; | ||
| 1855 | |||
| 1856 | (void) c->stop(c); | ||
| 1857 | dma_controller_destroy(c); | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 1861 | musb_platform_exit(musb); | ||
| 1862 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 1863 | |||
| 1864 | if (musb->clock) { | ||
| 1865 | clk_disable(musb->clock); | ||
| 1866 | clk_put(musb->clock); | ||
| 1867 | } | ||
| 1868 | |||
| 1869 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 1870 | put_device(musb->xceiv.dev); | ||
| 1871 | #endif | ||
| 1872 | |||
| 1873 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1874 | usb_put_hcd(musb_to_hcd(musb)); | ||
| 1875 | #else | ||
| 1876 | kfree(musb); | ||
| 1877 | #endif | ||
| 1878 | } | ||
| 1879 | |||
| 1880 | /* | ||
| 1881 | * Perform generic per-controller initialization. | ||
| 1882 | * | ||
| 1883 | * @pDevice: the controller (already clocked, etc) | ||
| 1884 | * @nIrq: irq | ||
| 1885 | * @mregs: virtual address of controller registers, | ||
| 1886 | * not yet corrected for platform-specific offsets | ||
| 1887 | */ | ||
| 1888 | static int __init | ||
| 1889 | musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | ||
| 1890 | { | ||
| 1891 | int status; | ||
| 1892 | struct musb *musb; | ||
| 1893 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
| 1894 | |||
| 1895 | /* The driver might handle more features than the board; OK. | ||
| 1896 | * Fail when the board needs a feature that's not enabled. | ||
| 1897 | */ | ||
| 1898 | if (!plat) { | ||
| 1899 | dev_dbg(dev, "no platform_data?\n"); | ||
| 1900 | return -ENODEV; | ||
| 1901 | } | ||
| 1902 | switch (plat->mode) { | ||
| 1903 | case MUSB_HOST: | ||
| 1904 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 1905 | break; | ||
| 1906 | #else | ||
| 1907 | goto bad_config; | ||
| 1908 | #endif | ||
| 1909 | case MUSB_PERIPHERAL: | ||
| 1910 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 1911 | break; | ||
| 1912 | #else | ||
| 1913 | goto bad_config; | ||
| 1914 | #endif | ||
| 1915 | case MUSB_OTG: | ||
| 1916 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 1917 | break; | ||
| 1918 | #else | ||
| 1919 | bad_config: | ||
| 1920 | #endif | ||
| 1921 | default: | ||
| 1922 | dev_err(dev, "incompatible Kconfig role setting\n"); | ||
| 1923 | return -EINVAL; | ||
| 1924 | } | ||
| 1925 | |||
| 1926 | /* allocate */ | ||
| 1927 | musb = allocate_instance(dev, plat->config, ctrl); | ||
| 1928 | if (!musb) | ||
| 1929 | return -ENOMEM; | ||
| 1930 | |||
| 1931 | spin_lock_init(&musb->lock); | ||
| 1932 | musb->board_mode = plat->mode; | ||
| 1933 | musb->board_set_power = plat->set_power; | ||
| 1934 | musb->set_clock = plat->set_clock; | ||
| 1935 | musb->min_power = plat->min_power; | ||
| 1936 | |||
| 1937 | /* Clock usage is chip-specific ... functional clock (DaVinci, | ||
| 1938 | * OMAP2430), or PHY ref (some TUSB6010 boards). All this core | ||
| 1939 | * code does is make sure a clock handle is available; platform | ||
| 1940 | * code manages it during start/stop and suspend/resume. | ||
| 1941 | */ | ||
| 1942 | if (plat->clock) { | ||
| 1943 | musb->clock = clk_get(dev, plat->clock); | ||
| 1944 | if (IS_ERR(musb->clock)) { | ||
| 1945 | status = PTR_ERR(musb->clock); | ||
| 1946 | musb->clock = NULL; | ||
| 1947 | goto fail; | ||
| 1948 | } | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | /* assume vbus is off */ | ||
| 1952 | |||
| 1953 | /* platform adjusts musb->mregs and musb->isr if needed, | ||
| 1954 | * and activates clocks | ||
| 1955 | */ | ||
| 1956 | musb->isr = generic_interrupt; | ||
| 1957 | status = musb_platform_init(musb); | ||
| 1958 | |||
| 1959 | if (status < 0) | ||
| 1960 | goto fail; | ||
| 1961 | if (!musb->isr) { | ||
| 1962 | status = -ENODEV; | ||
| 1963 | goto fail2; | ||
| 1964 | } | ||
| 1965 | |||
| 1966 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 1967 | if (use_dma && dev->dma_mask) { | ||
| 1968 | struct dma_controller *c; | ||
| 1969 | |||
| 1970 | c = dma_controller_create(musb, musb->mregs); | ||
| 1971 | musb->dma_controller = c; | ||
| 1972 | if (c) | ||
| 1973 | (void) c->start(c); | ||
| 1974 | } | ||
| 1975 | #endif | ||
| 1976 | /* ideally this would be abstracted in platform setup */ | ||
| 1977 | if (!is_dma_capable() || !musb->dma_controller) | ||
| 1978 | dev->dma_mask = NULL; | ||
| 1979 | |||
| 1980 | /* be sure interrupts are disabled before connecting ISR */ | ||
| 1981 | musb_platform_disable(musb); | ||
| 1982 | musb_generic_disable(musb); | ||
| 1983 | |||
| 1984 | /* setup musb parts of the core (especially endpoints) */ | ||
| 1985 | status = musb_core_init(plat->config->multipoint | ||
| 1986 | ? MUSB_CONTROLLER_MHDRC | ||
| 1987 | : MUSB_CONTROLLER_HDRC, musb); | ||
| 1988 | if (status < 0) | ||
| 1989 | goto fail2; | ||
| 1990 | |||
| 1991 | /* Init IRQ workqueue before request_irq */ | ||
| 1992 | INIT_WORK(&musb->irq_work, musb_irq_work); | ||
| 1993 | |||
| 1994 | /* attach to the IRQ */ | ||
| 1995 | if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) { | ||
| 1996 | dev_err(dev, "request_irq %d failed!\n", nIrq); | ||
| 1997 | status = -ENODEV; | ||
| 1998 | goto fail2; | ||
| 1999 | } | ||
| 2000 | musb->nIrq = nIrq; | ||
| 2001 | /* FIXME this handles wakeup irqs wrong */ | ||
| 2002 | if (enable_irq_wake(nIrq) == 0) | ||
| 2003 | device_init_wakeup(dev, 1); | ||
| 2004 | |||
| 2005 | pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n", | ||
| 2006 | musb_driver_name, | ||
| 2007 | ({char *s; | ||
| 2008 | switch (musb->board_mode) { | ||
| 2009 | case MUSB_HOST: s = "Host"; break; | ||
| 2010 | case MUSB_PERIPHERAL: s = "Peripheral"; break; | ||
| 2011 | default: s = "OTG"; break; | ||
| 2012 | }; s; }), | ||
| 2013 | ctrl, | ||
| 2014 | (is_dma_capable() && musb->dma_controller) | ||
| 2015 | ? "DMA" : "PIO", | ||
| 2016 | musb->nIrq); | ||
| 2017 | |||
| 2018 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 2019 | /* host side needs more setup, except for no-host modes */ | ||
| 2020 | if (musb->board_mode != MUSB_PERIPHERAL) { | ||
| 2021 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
| 2022 | |||
| 2023 | if (musb->board_mode == MUSB_OTG) | ||
| 2024 | hcd->self.otg_port = 1; | ||
| 2025 | musb->xceiv.host = &hcd->self; | ||
| 2026 | hcd->power_budget = 2 * (plat->power ? : 250); | ||
| 2027 | } | ||
| 2028 | #endif /* CONFIG_USB_MUSB_HDRC_HCD */ | ||
| 2029 | |||
| 2030 | /* For the host-only role, we can activate right away. | ||
| 2031 | * (We expect the ID pin to be forcibly grounded!!) | ||
| 2032 | * Otherwise, wait till the gadget driver hooks up. | ||
| 2033 | */ | ||
| 2034 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { | ||
| 2035 | MUSB_HST_MODE(musb); | ||
| 2036 | musb->xceiv.default_a = 1; | ||
| 2037 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 2038 | |||
| 2039 | status = usb_add_hcd(musb_to_hcd(musb), -1, 0); | ||
| 2040 | |||
| 2041 | DBG(1, "%s mode, status %d, devctl %02x %c\n", | ||
| 2042 | "HOST", status, | ||
| 2043 | musb_readb(musb->mregs, MUSB_DEVCTL), | ||
| 2044 | (musb_readb(musb->mregs, MUSB_DEVCTL) | ||
| 2045 | & MUSB_DEVCTL_BDEVICE | ||
| 2046 | ? 'B' : 'A')); | ||
| 2047 | |||
| 2048 | } else /* peripheral is enabled */ { | ||
| 2049 | MUSB_DEV_MODE(musb); | ||
| 2050 | musb->xceiv.default_a = 0; | ||
| 2051 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 2052 | |||
| 2053 | status = musb_gadget_setup(musb); | ||
| 2054 | |||
| 2055 | DBG(1, "%s mode, status %d, dev%02x\n", | ||
| 2056 | is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", | ||
| 2057 | status, | ||
| 2058 | musb_readb(musb->mregs, MUSB_DEVCTL)); | ||
| 2059 | |||
| 2060 | } | ||
| 2061 | |||
| 2062 | if (status == 0) | ||
| 2063 | musb_debug_create("driver/musb_hdrc", musb); | ||
| 2064 | else { | ||
| 2065 | fail: | ||
| 2066 | if (musb->clock) | ||
| 2067 | clk_put(musb->clock); | ||
| 2068 | device_init_wakeup(dev, 0); | ||
| 2069 | musb_free(musb); | ||
| 2070 | return status; | ||
| 2071 | } | ||
| 2072 | |||
| 2073 | #ifdef CONFIG_SYSFS | ||
| 2074 | status = device_create_file(dev, &dev_attr_mode); | ||
| 2075 | status = device_create_file(dev, &dev_attr_vbus); | ||
| 2076 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 2077 | status = device_create_file(dev, &dev_attr_srp); | ||
| 2078 | #endif /* CONFIG_USB_GADGET_MUSB_HDRC */ | ||
| 2079 | status = 0; | ||
| 2080 | #endif | ||
| 2081 | |||
| 2082 | return status; | ||
| 2083 | |||
| 2084 | fail2: | ||
| 2085 | musb_platform_exit(musb); | ||
| 2086 | goto fail; | ||
| 2087 | } | ||
| 2088 | |||
| 2089 | /*-------------------------------------------------------------------------*/ | ||
| 2090 | |||
| 2091 | /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just | ||
| 2092 | * bridge to a platform device; this driver then suffices. | ||
| 2093 | */ | ||
| 2094 | |||
| 2095 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 2096 | static u64 *orig_dma_mask; | ||
| 2097 | #endif | ||
| 2098 | |||
| 2099 | static int __init musb_probe(struct platform_device *pdev) | ||
| 2100 | { | ||
| 2101 | struct device *dev = &pdev->dev; | ||
| 2102 | int irq = platform_get_irq(pdev, 0); | ||
| 2103 | struct resource *iomem; | ||
| 2104 | void __iomem *base; | ||
| 2105 | |||
| 2106 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 2107 | if (!iomem || irq == 0) | ||
| 2108 | return -ENODEV; | ||
| 2109 | |||
| 2110 | base = ioremap(iomem->start, iomem->end - iomem->start + 1); | ||
| 2111 | if (!base) { | ||
| 2112 | dev_err(dev, "ioremap failed\n"); | ||
| 2113 | return -ENOMEM; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 2117 | /* clobbered by use_dma=n */ | ||
| 2118 | orig_dma_mask = dev->dma_mask; | ||
| 2119 | #endif | ||
| 2120 | return musb_init_controller(dev, irq, base); | ||
| 2121 | } | ||
| 2122 | |||
| 2123 | static int __devexit musb_remove(struct platform_device *pdev) | ||
| 2124 | { | ||
| 2125 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
| 2126 | void __iomem *ctrl_base = musb->ctrl_base; | ||
| 2127 | |||
| 2128 | /* this gets called on rmmod. | ||
| 2129 | * - Host mode: host may still be active | ||
| 2130 | * - Peripheral mode: peripheral is deactivated (or never-activated) | ||
| 2131 | * - OTG mode: both roles are deactivated (or never-activated) | ||
| 2132 | */ | ||
| 2133 | musb_shutdown(pdev); | ||
| 2134 | musb_debug_delete("driver/musb_hdrc", musb); | ||
| 2135 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 2136 | if (musb->board_mode == MUSB_HOST) | ||
| 2137 | usb_remove_hcd(musb_to_hcd(musb)); | ||
| 2138 | #endif | ||
| 2139 | musb_free(musb); | ||
| 2140 | iounmap(ctrl_base); | ||
| 2141 | device_init_wakeup(&pdev->dev, 0); | ||
| 2142 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 2143 | pdev->dev.dma_mask = orig_dma_mask; | ||
| 2144 | #endif | ||
| 2145 | return 0; | ||
| 2146 | } | ||
| 2147 | |||
| 2148 | #ifdef CONFIG_PM | ||
| 2149 | |||
| 2150 | static int musb_suspend(struct platform_device *pdev, pm_message_t message) | ||
| 2151 | { | ||
| 2152 | unsigned long flags; | ||
| 2153 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
| 2154 | |||
| 2155 | if (!musb->clock) | ||
| 2156 | return 0; | ||
| 2157 | |||
| 2158 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2159 | |||
| 2160 | if (is_peripheral_active(musb)) { | ||
| 2161 | /* FIXME force disconnect unless we know USB will wake | ||
| 2162 | * the system up quickly enough to respond ... | ||
| 2163 | */ | ||
| 2164 | } else if (is_host_active(musb)) { | ||
| 2165 | /* we know all the children are suspended; sometimes | ||
| 2166 | * they will even be wakeup-enabled. | ||
| 2167 | */ | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | if (musb->set_clock) | ||
| 2171 | musb->set_clock(musb->clock, 0); | ||
| 2172 | else | ||
| 2173 | clk_disable(musb->clock); | ||
| 2174 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2175 | return 0; | ||
| 2176 | } | ||
| 2177 | |||
| 2178 | static int musb_resume(struct platform_device *pdev) | ||
| 2179 | { | ||
| 2180 | unsigned long flags; | ||
| 2181 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
| 2182 | |||
| 2183 | if (!musb->clock) | ||
| 2184 | return 0; | ||
| 2185 | |||
| 2186 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2187 | |||
| 2188 | if (musb->set_clock) | ||
| 2189 | musb->set_clock(musb->clock, 1); | ||
| 2190 | else | ||
| 2191 | clk_enable(musb->clock); | ||
| 2192 | |||
| 2193 | /* for static cmos like DaVinci, register values were preserved | ||
| 2194 | * unless for some reason the whole soc powered down and we're | ||
| 2195 | * not treating that as a whole-system restart (e.g. swsusp) | ||
| 2196 | */ | ||
| 2197 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2198 | return 0; | ||
| 2199 | } | ||
| 2200 | |||
| 2201 | #else | ||
| 2202 | #define musb_suspend NULL | ||
| 2203 | #define musb_resume NULL | ||
| 2204 | #endif | ||
| 2205 | |||
| 2206 | static struct platform_driver musb_driver = { | ||
| 2207 | .driver = { | ||
| 2208 | .name = (char *)musb_driver_name, | ||
| 2209 | .bus = &platform_bus_type, | ||
| 2210 | .owner = THIS_MODULE, | ||
| 2211 | }, | ||
| 2212 | .remove = __devexit_p(musb_remove), | ||
| 2213 | .shutdown = musb_shutdown, | ||
| 2214 | .suspend = musb_suspend, | ||
| 2215 | .resume = musb_resume, | ||
| 2216 | }; | ||
| 2217 | |||
| 2218 | /*-------------------------------------------------------------------------*/ | ||
| 2219 | |||
| 2220 | static int __init musb_init(void) | ||
| 2221 | { | ||
| 2222 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 2223 | if (usb_disabled()) | ||
| 2224 | return 0; | ||
| 2225 | #endif | ||
| 2226 | |||
| 2227 | pr_info("%s: version " MUSB_VERSION ", " | ||
| 2228 | #ifdef CONFIG_MUSB_PIO_ONLY | ||
| 2229 | "pio" | ||
| 2230 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | ||
| 2231 | "cppi-dma" | ||
| 2232 | #elif defined(CONFIG_USB_INVENTRA_DMA) | ||
| 2233 | "musb-dma" | ||
| 2234 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
| 2235 | "tusb-omap-dma" | ||
| 2236 | #else | ||
| 2237 | "?dma?" | ||
| 2238 | #endif | ||
| 2239 | ", " | ||
| 2240 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 2241 | "otg (peripheral+host)" | ||
| 2242 | #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) | ||
| 2243 | "peripheral" | ||
| 2244 | #elif defined(CONFIG_USB_MUSB_HDRC_HCD) | ||
| 2245 | "host" | ||
| 2246 | #endif | ||
| 2247 | ", debug=%d\n", | ||
| 2248 | musb_driver_name, debug); | ||
| 2249 | return platform_driver_probe(&musb_driver, musb_probe); | ||
| 2250 | } | ||
| 2251 | |||
| 2252 | /* make us init after usbcore and before usb | ||
| 2253 | * gadget and host-side drivers start to register | ||
| 2254 | */ | ||
| 2255 | subsys_initcall(musb_init); | ||
| 2256 | |||
| 2257 | static void __exit musb_cleanup(void) | ||
| 2258 | { | ||
| 2259 | platform_driver_unregister(&musb_driver); | ||
| 2260 | } | ||
| 2261 | module_exit(musb_cleanup); | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h new file mode 100644 index 000000000000..eade46d81708 --- /dev/null +++ b/drivers/usb/musb/musb_core.h | |||
| @@ -0,0 +1,507 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver defines | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __MUSB_CORE_H__ | ||
| 36 | #define __MUSB_CORE_H__ | ||
| 37 | |||
| 38 | #include <linux/slab.h> | ||
| 39 | #include <linux/list.h> | ||
| 40 | #include <linux/interrupt.h> | ||
| 41 | #include <linux/smp_lock.h> | ||
| 42 | #include <linux/errno.h> | ||
| 43 | #include <linux/clk.h> | ||
| 44 | #include <linux/device.h> | ||
| 45 | #include <linux/usb/ch9.h> | ||
| 46 | #include <linux/usb/gadget.h> | ||
| 47 | #include <linux/usb.h> | ||
| 48 | #include <linux/usb/otg.h> | ||
| 49 | #include <linux/usb/musb.h> | ||
| 50 | |||
| 51 | struct musb; | ||
| 52 | struct musb_hw_ep; | ||
| 53 | struct musb_ep; | ||
| 54 | |||
| 55 | |||
| 56 | #include "musb_debug.h" | ||
| 57 | #include "musb_dma.h" | ||
| 58 | |||
| 59 | #include "musb_io.h" | ||
| 60 | #include "musb_regs.h" | ||
| 61 | |||
| 62 | #include "musb_gadget.h" | ||
| 63 | #include "../core/hcd.h" | ||
| 64 | #include "musb_host.h" | ||
| 65 | |||
| 66 | |||
| 67 | |||
| 68 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 69 | |||
| 70 | #define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) | ||
| 71 | #define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL) | ||
| 72 | #define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG) | ||
| 73 | |||
| 74 | /* NOTE: otg and peripheral-only state machines start at B_IDLE. | ||
| 75 | * OTG or host-only go to A_IDLE when ID is sensed. | ||
| 76 | */ | ||
| 77 | #define is_peripheral_active(m) (!(m)->is_host) | ||
| 78 | #define is_host_active(m) ((m)->is_host) | ||
| 79 | |||
| 80 | #else | ||
| 81 | #define is_peripheral_enabled(musb) is_peripheral_capable() | ||
| 82 | #define is_host_enabled(musb) is_host_capable() | ||
| 83 | #define is_otg_enabled(musb) 0 | ||
| 84 | |||
| 85 | #define is_peripheral_active(musb) is_peripheral_capable() | ||
| 86 | #define is_host_active(musb) is_host_capable() | ||
| 87 | #endif | ||
| 88 | |||
| 89 | #if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL) | ||
| 90 | /* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always | ||
| 91 | * override that choice selection (often USB_GADGET_DUMMY_HCD). | ||
| 92 | */ | ||
| 93 | #ifndef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 94 | #error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC | ||
| 95 | #endif | ||
| 96 | #endif /* need MUSB gadget selection */ | ||
| 97 | |||
| 98 | |||
| 99 | #ifdef CONFIG_PROC_FS | ||
| 100 | #include <linux/fs.h> | ||
| 101 | #define MUSB_CONFIG_PROC_FS | ||
| 102 | #endif | ||
| 103 | |||
| 104 | /****************************** PERIPHERAL ROLE *****************************/ | ||
| 105 | |||
| 106 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 107 | |||
| 108 | #define is_peripheral_capable() (1) | ||
| 109 | |||
| 110 | extern irqreturn_t musb_g_ep0_irq(struct musb *); | ||
| 111 | extern void musb_g_tx(struct musb *, u8); | ||
| 112 | extern void musb_g_rx(struct musb *, u8); | ||
| 113 | extern void musb_g_reset(struct musb *); | ||
| 114 | extern void musb_g_suspend(struct musb *); | ||
| 115 | extern void musb_g_resume(struct musb *); | ||
| 116 | extern void musb_g_wakeup(struct musb *); | ||
| 117 | extern void musb_g_disconnect(struct musb *); | ||
| 118 | |||
| 119 | #else | ||
| 120 | |||
| 121 | #define is_peripheral_capable() (0) | ||
| 122 | |||
| 123 | static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; } | ||
| 124 | static inline void musb_g_reset(struct musb *m) {} | ||
| 125 | static inline void musb_g_suspend(struct musb *m) {} | ||
| 126 | static inline void musb_g_resume(struct musb *m) {} | ||
| 127 | static inline void musb_g_wakeup(struct musb *m) {} | ||
| 128 | static inline void musb_g_disconnect(struct musb *m) {} | ||
| 129 | |||
| 130 | #endif | ||
| 131 | |||
| 132 | /****************************** HOST ROLE ***********************************/ | ||
| 133 | |||
| 134 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 135 | |||
| 136 | #define is_host_capable() (1) | ||
| 137 | |||
| 138 | extern irqreturn_t musb_h_ep0_irq(struct musb *); | ||
| 139 | extern void musb_host_tx(struct musb *, u8); | ||
| 140 | extern void musb_host_rx(struct musb *, u8); | ||
| 141 | |||
| 142 | #else | ||
| 143 | |||
| 144 | #define is_host_capable() (0) | ||
| 145 | |||
| 146 | static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; } | ||
| 147 | static inline void musb_host_tx(struct musb *m, u8 e) {} | ||
| 148 | static inline void musb_host_rx(struct musb *m, u8 e) {} | ||
| 149 | |||
| 150 | #endif | ||
| 151 | |||
| 152 | |||
| 153 | /****************************** CONSTANTS ********************************/ | ||
| 154 | |||
| 155 | #ifndef MUSB_C_NUM_EPS | ||
| 156 | #define MUSB_C_NUM_EPS ((u8)16) | ||
| 157 | #endif | ||
| 158 | |||
| 159 | #ifndef MUSB_MAX_END0_PACKET | ||
| 160 | #define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE) | ||
| 161 | #endif | ||
| 162 | |||
| 163 | /* host side ep0 states */ | ||
| 164 | enum musb_h_ep0_state { | ||
| 165 | MUSB_EP0_IDLE, | ||
| 166 | MUSB_EP0_START, /* expect ack of setup */ | ||
| 167 | MUSB_EP0_IN, /* expect IN DATA */ | ||
| 168 | MUSB_EP0_OUT, /* expect ack of OUT DATA */ | ||
| 169 | MUSB_EP0_STATUS, /* expect ack of STATUS */ | ||
| 170 | } __attribute__ ((packed)); | ||
| 171 | |||
| 172 | /* peripheral side ep0 states */ | ||
| 173 | enum musb_g_ep0_state { | ||
| 174 | MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */ | ||
| 175 | MUSB_EP0_STAGE_TX, /* IN data */ | ||
| 176 | MUSB_EP0_STAGE_RX, /* OUT data */ | ||
| 177 | MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ | ||
| 178 | MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */ | ||
| 179 | MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ | ||
| 180 | } __attribute__ ((packed)); | ||
| 181 | |||
| 182 | /* OTG protocol constants */ | ||
| 183 | #define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ | ||
| 184 | #define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */ | ||
| 185 | #define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */ | ||
| 186 | |||
| 187 | /*************************** REGISTER ACCESS ********************************/ | ||
| 188 | |||
| 189 | /* Endpoint registers (other than dynfifo setup) can be accessed either | ||
| 190 | * directly with the "flat" model, or after setting up an index register. | ||
| 191 | */ | ||
| 192 | |||
| 193 | #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ | ||
| 194 | || defined(CONFIG_ARCH_OMAP3430) | ||
| 195 | /* REVISIT indexed access seemed to | ||
| 196 | * misbehave (on DaVinci) for at least peripheral IN ... | ||
| 197 | */ | ||
| 198 | #define MUSB_FLAT_REG | ||
| 199 | #endif | ||
| 200 | |||
| 201 | /* TUSB mapping: "flat" plus ep0 special cases */ | ||
| 202 | #if defined(CONFIG_USB_TUSB6010) | ||
| 203 | #define musb_ep_select(_mbase, _epnum) \ | ||
| 204 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) | ||
| 205 | #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET | ||
| 206 | |||
| 207 | /* "flat" mapping: each endpoint has its own i/o address */ | ||
| 208 | #elif defined(MUSB_FLAT_REG) | ||
| 209 | #define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum))) | ||
| 210 | #define MUSB_EP_OFFSET MUSB_FLAT_OFFSET | ||
| 211 | |||
| 212 | /* "indexed" mapping: INDEX register controls register bank select */ | ||
| 213 | #else | ||
| 214 | #define musb_ep_select(_mbase, _epnum) \ | ||
| 215 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) | ||
| 216 | #define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET | ||
| 217 | #endif | ||
| 218 | |||
| 219 | /****************************** FUNCTIONS ********************************/ | ||
| 220 | |||
| 221 | #define MUSB_HST_MODE(_musb)\ | ||
| 222 | { (_musb)->is_host = true; } | ||
| 223 | #define MUSB_DEV_MODE(_musb) \ | ||
| 224 | { (_musb)->is_host = false; } | ||
| 225 | |||
| 226 | #define test_devctl_hst_mode(_x) \ | ||
| 227 | (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM) | ||
| 228 | |||
| 229 | #define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral") | ||
| 230 | |||
| 231 | /******************************** TYPES *************************************/ | ||
| 232 | |||
| 233 | /* | ||
| 234 | * struct musb_hw_ep - endpoint hardware (bidirectional) | ||
| 235 | * | ||
| 236 | * Ordered slightly for better cacheline locality. | ||
| 237 | */ | ||
| 238 | struct musb_hw_ep { | ||
| 239 | struct musb *musb; | ||
| 240 | void __iomem *fifo; | ||
| 241 | void __iomem *regs; | ||
| 242 | |||
| 243 | #ifdef CONFIG_USB_TUSB6010 | ||
| 244 | void __iomem *conf; | ||
| 245 | #endif | ||
| 246 | |||
| 247 | /* index in musb->endpoints[] */ | ||
| 248 | u8 epnum; | ||
| 249 | |||
| 250 | /* hardware configuration, possibly dynamic */ | ||
| 251 | bool is_shared_fifo; | ||
| 252 | bool tx_double_buffered; | ||
| 253 | bool rx_double_buffered; | ||
| 254 | u16 max_packet_sz_tx; | ||
| 255 | u16 max_packet_sz_rx; | ||
| 256 | |||
| 257 | struct dma_channel *tx_channel; | ||
| 258 | struct dma_channel *rx_channel; | ||
| 259 | |||
| 260 | #ifdef CONFIG_USB_TUSB6010 | ||
| 261 | /* TUSB has "asynchronous" and "synchronous" dma modes */ | ||
| 262 | dma_addr_t fifo_async; | ||
| 263 | dma_addr_t fifo_sync; | ||
| 264 | void __iomem *fifo_sync_va; | ||
| 265 | #endif | ||
| 266 | |||
| 267 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 268 | void __iomem *target_regs; | ||
| 269 | |||
| 270 | /* currently scheduled peripheral endpoint */ | ||
| 271 | struct musb_qh *in_qh; | ||
| 272 | struct musb_qh *out_qh; | ||
| 273 | |||
| 274 | u8 rx_reinit; | ||
| 275 | u8 tx_reinit; | ||
| 276 | #endif | ||
| 277 | |||
| 278 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 279 | /* peripheral side */ | ||
| 280 | struct musb_ep ep_in; /* TX */ | ||
| 281 | struct musb_ep ep_out; /* RX */ | ||
| 282 | #endif | ||
| 283 | }; | ||
| 284 | |||
| 285 | static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep) | ||
| 286 | { | ||
| 287 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 288 | return next_request(&hw_ep->ep_in); | ||
| 289 | #else | ||
| 290 | return NULL; | ||
| 291 | #endif | ||
| 292 | } | ||
| 293 | |||
| 294 | static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep) | ||
| 295 | { | ||
| 296 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 297 | return next_request(&hw_ep->ep_out); | ||
| 298 | #else | ||
| 299 | return NULL; | ||
| 300 | #endif | ||
| 301 | } | ||
| 302 | |||
| 303 | /* | ||
| 304 | * struct musb - Driver instance data. | ||
| 305 | */ | ||
| 306 | struct musb { | ||
| 307 | /* device lock */ | ||
| 308 | spinlock_t lock; | ||
| 309 | struct clk *clock; | ||
| 310 | irqreturn_t (*isr)(int, void *); | ||
| 311 | struct work_struct irq_work; | ||
| 312 | |||
| 313 | /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ | ||
| 314 | #define MUSB_PORT_STAT_RESUME (1 << 31) | ||
| 315 | |||
| 316 | u32 port1_status; | ||
| 317 | |||
| 318 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 319 | unsigned long rh_timer; | ||
| 320 | |||
| 321 | enum musb_h_ep0_state ep0_stage; | ||
| 322 | |||
| 323 | /* bulk traffic normally dedicates endpoint hardware, and each | ||
| 324 | * direction has its own ring of host side endpoints. | ||
| 325 | * we try to progress the transfer at the head of each endpoint's | ||
| 326 | * queue until it completes or NAKs too much; then we try the next | ||
| 327 | * endpoint. | ||
| 328 | */ | ||
| 329 | struct musb_hw_ep *bulk_ep; | ||
| 330 | |||
| 331 | struct list_head control; /* of musb_qh */ | ||
| 332 | struct list_head in_bulk; /* of musb_qh */ | ||
| 333 | struct list_head out_bulk; /* of musb_qh */ | ||
| 334 | struct musb_qh *periodic[32]; /* tree of interrupt+iso */ | ||
| 335 | #endif | ||
| 336 | |||
| 337 | /* called with IRQs blocked; ON/nonzero implies starting a session, | ||
| 338 | * and waiting at least a_wait_vrise_tmout. | ||
| 339 | */ | ||
| 340 | void (*board_set_vbus)(struct musb *, int is_on); | ||
| 341 | |||
| 342 | struct dma_controller *dma_controller; | ||
| 343 | |||
| 344 | struct device *controller; | ||
| 345 | void __iomem *ctrl_base; | ||
| 346 | void __iomem *mregs; | ||
| 347 | |||
| 348 | #ifdef CONFIG_USB_TUSB6010 | ||
| 349 | dma_addr_t async; | ||
| 350 | dma_addr_t sync; | ||
| 351 | void __iomem *sync_va; | ||
| 352 | #endif | ||
| 353 | |||
| 354 | /* passed down from chip/board specific irq handlers */ | ||
| 355 | u8 int_usb; | ||
| 356 | u16 int_rx; | ||
| 357 | u16 int_tx; | ||
| 358 | |||
| 359 | struct otg_transceiver xceiv; | ||
| 360 | |||
| 361 | int nIrq; | ||
| 362 | |||
| 363 | struct musb_hw_ep endpoints[MUSB_C_NUM_EPS]; | ||
| 364 | #define control_ep endpoints | ||
| 365 | |||
| 366 | #define VBUSERR_RETRY_COUNT 3 | ||
| 367 | u16 vbuserr_retry; | ||
| 368 | u16 epmask; | ||
| 369 | u8 nr_endpoints; | ||
| 370 | |||
| 371 | u8 board_mode; /* enum musb_mode */ | ||
| 372 | int (*board_set_power)(int state); | ||
| 373 | |||
| 374 | int (*set_clock)(struct clk *clk, int is_active); | ||
| 375 | |||
| 376 | u8 min_power; /* vbus for periph, in mA/2 */ | ||
| 377 | |||
| 378 | bool is_host; | ||
| 379 | |||
| 380 | int a_wait_bcon; /* VBUS timeout in msecs */ | ||
| 381 | unsigned long idle_timeout; /* Next timeout in jiffies */ | ||
| 382 | |||
| 383 | /* active means connected and not suspended */ | ||
| 384 | unsigned is_active:1; | ||
| 385 | |||
| 386 | unsigned is_multipoint:1; | ||
| 387 | unsigned ignore_disconnect:1; /* during bus resets */ | ||
| 388 | |||
| 389 | #ifdef C_MP_TX | ||
| 390 | unsigned bulk_split:1; | ||
| 391 | #define can_bulk_split(musb,type) \ | ||
| 392 | (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) | ||
| 393 | #else | ||
| 394 | #define can_bulk_split(musb, type) 0 | ||
| 395 | #endif | ||
| 396 | |||
| 397 | #ifdef C_MP_RX | ||
| 398 | unsigned bulk_combine:1; | ||
| 399 | #define can_bulk_combine(musb,type) \ | ||
| 400 | (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) | ||
| 401 | #else | ||
| 402 | #define can_bulk_combine(musb, type) 0 | ||
| 403 | #endif | ||
| 404 | |||
| 405 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 406 | /* is_suspended means USB B_PERIPHERAL suspend */ | ||
| 407 | unsigned is_suspended:1; | ||
| 408 | |||
| 409 | /* may_wakeup means remote wakeup is enabled */ | ||
| 410 | unsigned may_wakeup:1; | ||
| 411 | |||
| 412 | /* is_self_powered is reported in device status and the | ||
| 413 | * config descriptor. is_bus_powered means B_PERIPHERAL | ||
| 414 | * draws some VBUS current; both can be true. | ||
| 415 | */ | ||
| 416 | unsigned is_self_powered:1; | ||
| 417 | unsigned is_bus_powered:1; | ||
| 418 | |||
| 419 | unsigned set_address:1; | ||
| 420 | unsigned test_mode:1; | ||
| 421 | unsigned softconnect:1; | ||
| 422 | |||
| 423 | u8 address; | ||
| 424 | u8 test_mode_nr; | ||
| 425 | u16 ackpend; /* ep0 */ | ||
| 426 | enum musb_g_ep0_state ep0_state; | ||
| 427 | struct usb_gadget g; /* the gadget */ | ||
| 428 | struct usb_gadget_driver *gadget_driver; /* its driver */ | ||
| 429 | #endif | ||
| 430 | |||
| 431 | struct musb_hdrc_config *config; | ||
| 432 | |||
| 433 | #ifdef MUSB_CONFIG_PROC_FS | ||
| 434 | struct proc_dir_entry *proc_entry; | ||
| 435 | #endif | ||
| 436 | }; | ||
| 437 | |||
| 438 | static inline void musb_set_vbus(struct musb *musb, int is_on) | ||
| 439 | { | ||
| 440 | musb->board_set_vbus(musb, is_on); | ||
| 441 | } | ||
| 442 | |||
| 443 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 444 | static inline struct musb *gadget_to_musb(struct usb_gadget *g) | ||
| 445 | { | ||
| 446 | return container_of(g, struct musb, g); | ||
| 447 | } | ||
| 448 | #endif | ||
| 449 | |||
| 450 | |||
| 451 | /***************************** Glue it together *****************************/ | ||
| 452 | |||
| 453 | extern const char musb_driver_name[]; | ||
| 454 | |||
| 455 | extern void musb_start(struct musb *musb); | ||
| 456 | extern void musb_stop(struct musb *musb); | ||
| 457 | |||
| 458 | extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); | ||
| 459 | extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); | ||
| 460 | |||
| 461 | extern void musb_load_testpacket(struct musb *); | ||
| 462 | |||
| 463 | extern irqreturn_t musb_interrupt(struct musb *); | ||
| 464 | |||
| 465 | extern void musb_platform_enable(struct musb *musb); | ||
| 466 | extern void musb_platform_disable(struct musb *musb); | ||
| 467 | |||
| 468 | extern void musb_hnp_stop(struct musb *musb); | ||
| 469 | |||
| 470 | extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode); | ||
| 471 | |||
| 472 | #if defined(CONFIG_USB_TUSB6010) || \ | ||
| 473 | defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) | ||
| 474 | extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); | ||
| 475 | #else | ||
| 476 | #define musb_platform_try_idle(x, y) do {} while (0) | ||
| 477 | #endif | ||
| 478 | |||
| 479 | #ifdef CONFIG_USB_TUSB6010 | ||
| 480 | extern int musb_platform_get_vbus_status(struct musb *musb); | ||
| 481 | #else | ||
| 482 | #define musb_platform_get_vbus_status(x) 0 | ||
| 483 | #endif | ||
| 484 | |||
| 485 | extern int __init musb_platform_init(struct musb *musb); | ||
| 486 | extern int musb_platform_exit(struct musb *musb); | ||
| 487 | |||
| 488 | /*-------------------------- ProcFS definitions ---------------------*/ | ||
| 489 | |||
| 490 | struct proc_dir_entry; | ||
| 491 | |||
| 492 | #if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS) | ||
| 493 | extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data); | ||
| 494 | extern void musb_debug_delete(char *name, struct musb *data); | ||
| 495 | |||
| 496 | #else | ||
| 497 | static inline struct proc_dir_entry * | ||
| 498 | musb_debug_create(char *name, struct musb *data) | ||
| 499 | { | ||
| 500 | return NULL; | ||
| 501 | } | ||
| 502 | static inline void musb_debug_delete(char *name, struct musb *data) | ||
| 503 | { | ||
| 504 | } | ||
| 505 | #endif | ||
| 506 | |||
| 507 | #endif /* __MUSB_CORE_H__ */ | ||
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h new file mode 100644 index 000000000000..3bdb311e820d --- /dev/null +++ b/drivers/usb/musb/musb_debug.h | |||
| @@ -0,0 +1,66 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver debug defines | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __MUSB_LINUX_DEBUG_H__ | ||
| 36 | #define __MUSB_LINUX_DEBUG_H__ | ||
| 37 | |||
| 38 | #define yprintk(facility, format, args...) \ | ||
| 39 | do { printk(facility "%s %d: " format , \ | ||
| 40 | __func__, __LINE__ , ## args); } while (0) | ||
| 41 | #define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args) | ||
| 42 | #define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) | ||
| 43 | #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) | ||
| 44 | |||
| 45 | #define xprintk(level, facility, format, args...) do { \ | ||
| 46 | if (_dbg_level(level)) { \ | ||
| 47 | printk(facility "%s %d: " format , \ | ||
| 48 | __func__, __LINE__ , ## args); \ | ||
| 49 | } } while (0) | ||
| 50 | |||
| 51 | #if MUSB_DEBUG > 0 | ||
| 52 | extern unsigned debug; | ||
| 53 | #else | ||
| 54 | #define debug 0 | ||
| 55 | #endif | ||
| 56 | |||
| 57 | static inline int _dbg_level(unsigned l) | ||
| 58 | { | ||
| 59 | return debug >= l; | ||
| 60 | } | ||
| 61 | |||
| 62 | #define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) | ||
| 63 | |||
| 64 | extern const char *otg_state_string(struct musb *); | ||
| 65 | |||
| 66 | #endif /* __MUSB_LINUX_DEBUG_H__ */ | ||
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h new file mode 100644 index 000000000000..0a2c4e3602c1 --- /dev/null +++ b/drivers/usb/musb/musb_dma.h | |||
| @@ -0,0 +1,172 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver DMA controller abstraction | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __MUSB_DMA_H__ | ||
| 36 | #define __MUSB_DMA_H__ | ||
| 37 | |||
| 38 | struct musb_hw_ep; | ||
| 39 | |||
| 40 | /* | ||
| 41 | * DMA Controller Abstraction | ||
| 42 | * | ||
| 43 | * DMA Controllers are abstracted to allow use of a variety of different | ||
| 44 | * implementations of DMA, as allowed by the Inventra USB cores. On the | ||
| 45 | * host side, usbcore sets up the DMA mappings and flushes caches; on the | ||
| 46 | * peripheral side, the gadget controller driver does. Responsibilities | ||
| 47 | * of a DMA controller driver include: | ||
| 48 | * | ||
| 49 | * - Handling the details of moving multiple USB packets | ||
| 50 | * in cooperation with the Inventra USB core, including especially | ||
| 51 | * the correct RX side treatment of short packets and buffer-full | ||
| 52 | * states (both of which terminate transfers). | ||
| 53 | * | ||
| 54 | * - Knowing the correlation between dma channels and the | ||
| 55 | * Inventra core's local endpoint resources and data direction. | ||
| 56 | * | ||
| 57 | * - Maintaining a list of allocated/available channels. | ||
| 58 | * | ||
| 59 | * - Updating channel status on interrupts, | ||
| 60 | * whether shared with the Inventra core or separate. | ||
| 61 | */ | ||
| 62 | |||
| 63 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | ||
| 64 | |||
| 65 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 66 | #define is_dma_capable() (1) | ||
| 67 | #else | ||
| 68 | #define is_dma_capable() (0) | ||
| 69 | #endif | ||
| 70 | |||
| 71 | #ifdef CONFIG_USB_TI_CPPI_DMA | ||
| 72 | #define is_cppi_enabled() 1 | ||
| 73 | #else | ||
| 74 | #define is_cppi_enabled() 0 | ||
| 75 | #endif | ||
| 76 | |||
| 77 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | ||
| 78 | #define tusb_dma_omap() 1 | ||
| 79 | #else | ||
| 80 | #define tusb_dma_omap() 0 | ||
| 81 | #endif | ||
| 82 | |||
| 83 | /* | ||
| 84 | * DMA channel status ... updated by the dma controller driver whenever that | ||
| 85 | * status changes, and protected by the overall controller spinlock. | ||
| 86 | */ | ||
| 87 | enum dma_channel_status { | ||
| 88 | /* unallocated */ | ||
| 89 | MUSB_DMA_STATUS_UNKNOWN, | ||
| 90 | /* allocated ... but not busy, no errors */ | ||
| 91 | MUSB_DMA_STATUS_FREE, | ||
| 92 | /* busy ... transactions are active */ | ||
| 93 | MUSB_DMA_STATUS_BUSY, | ||
| 94 | /* transaction(s) aborted due to ... dma or memory bus error */ | ||
| 95 | MUSB_DMA_STATUS_BUS_ABORT, | ||
| 96 | /* transaction(s) aborted due to ... core error or USB fault */ | ||
| 97 | MUSB_DMA_STATUS_CORE_ABORT | ||
| 98 | }; | ||
| 99 | |||
| 100 | struct dma_controller; | ||
| 101 | |||
| 102 | /** | ||
| 103 | * struct dma_channel - A DMA channel. | ||
| 104 | * @private_data: channel-private data | ||
| 105 | * @max_len: the maximum number of bytes the channel can move in one | ||
| 106 | * transaction (typically representing many USB maximum-sized packets) | ||
| 107 | * @actual_len: how many bytes have been transferred | ||
| 108 | * @status: current channel status (updated e.g. on interrupt) | ||
| 109 | * @desired_mode: true if mode 1 is desired; false if mode 0 is desired | ||
| 110 | * | ||
| 111 | * channels are associated with an endpoint for the duration of at least | ||
| 112 | * one usb transfer. | ||
| 113 | */ | ||
| 114 | struct dma_channel { | ||
| 115 | void *private_data; | ||
| 116 | /* FIXME not void* private_data, but a dma_controller * */ | ||
| 117 | size_t max_len; | ||
| 118 | size_t actual_len; | ||
| 119 | enum dma_channel_status status; | ||
| 120 | bool desired_mode; | ||
| 121 | }; | ||
| 122 | |||
| 123 | /* | ||
| 124 | * dma_channel_status - return status of dma channel | ||
| 125 | * @c: the channel | ||
| 126 | * | ||
| 127 | * Returns the software's view of the channel status. If that status is BUSY | ||
| 128 | * then it's possible that the hardware has completed (or aborted) a transfer, | ||
| 129 | * so the driver needs to update that status. | ||
| 130 | */ | ||
| 131 | static inline enum dma_channel_status | ||
| 132 | dma_channel_status(struct dma_channel *c) | ||
| 133 | { | ||
| 134 | return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN; | ||
| 135 | } | ||
| 136 | |||
| 137 | /** | ||
| 138 | * struct dma_controller - A DMA Controller. | ||
| 139 | * @start: call this to start a DMA controller; | ||
| 140 | * return 0 on success, else negative errno | ||
| 141 | * @stop: call this to stop a DMA controller | ||
| 142 | * return 0 on success, else negative errno | ||
| 143 | * @channel_alloc: call this to allocate a DMA channel | ||
| 144 | * @channel_release: call this to release a DMA channel | ||
| 145 | * @channel_abort: call this to abort a pending DMA transaction, | ||
| 146 | * returning it to FREE (but allocated) state | ||
| 147 | * | ||
| 148 | * Controllers manage dma channels. | ||
| 149 | */ | ||
| 150 | struct dma_controller { | ||
| 151 | int (*start)(struct dma_controller *); | ||
| 152 | int (*stop)(struct dma_controller *); | ||
| 153 | struct dma_channel *(*channel_alloc)(struct dma_controller *, | ||
| 154 | struct musb_hw_ep *, u8 is_tx); | ||
| 155 | void (*channel_release)(struct dma_channel *); | ||
| 156 | int (*channel_program)(struct dma_channel *channel, | ||
| 157 | u16 maxpacket, u8 mode, | ||
| 158 | dma_addr_t dma_addr, | ||
| 159 | u32 length); | ||
| 160 | int (*channel_abort)(struct dma_channel *); | ||
| 161 | }; | ||
| 162 | |||
| 163 | /* called after channel_program(), may indicate a fault */ | ||
| 164 | extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit); | ||
| 165 | |||
| 166 | |||
| 167 | extern struct dma_controller *__init | ||
| 168 | dma_controller_create(struct musb *, void __iomem *); | ||
| 169 | |||
| 170 | extern void dma_controller_destroy(struct dma_controller *); | ||
| 171 | |||
| 172 | #endif /* __MUSB_DMA_H__ */ | ||
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c new file mode 100644 index 000000000000..d6a802c224fa --- /dev/null +++ b/drivers/usb/musb/musb_gadget.c | |||
| @@ -0,0 +1,2031 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver peripheral support | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/kernel.h> | ||
| 36 | #include <linux/list.h> | ||
| 37 | #include <linux/timer.h> | ||
| 38 | #include <linux/module.h> | ||
| 39 | #include <linux/smp.h> | ||
| 40 | #include <linux/spinlock.h> | ||
| 41 | #include <linux/delay.h> | ||
| 42 | #include <linux/moduleparam.h> | ||
| 43 | #include <linux/stat.h> | ||
| 44 | #include <linux/dma-mapping.h> | ||
| 45 | |||
| 46 | #include "musb_core.h" | ||
| 47 | |||
| 48 | |||
| 49 | /* MUSB PERIPHERAL status 3-mar-2006: | ||
| 50 | * | ||
| 51 | * - EP0 seems solid. It passes both USBCV and usbtest control cases. | ||
| 52 | * Minor glitches: | ||
| 53 | * | ||
| 54 | * + remote wakeup to Linux hosts work, but saw USBCV failures; | ||
| 55 | * in one test run (operator error?) | ||
| 56 | * + endpoint halt tests -- in both usbtest and usbcv -- seem | ||
| 57 | * to break when dma is enabled ... is something wrongly | ||
| 58 | * clearing SENDSTALL? | ||
| 59 | * | ||
| 60 | * - Mass storage behaved ok when last tested. Network traffic patterns | ||
| 61 | * (with lots of short transfers etc) need retesting; they turn up the | ||
| 62 | * worst cases of the DMA, since short packets are typical but are not | ||
| 63 | * required. | ||
| 64 | * | ||
| 65 | * - TX/IN | ||
| 66 | * + both pio and dma behave in with network and g_zero tests | ||
| 67 | * + no cppi throughput issues other than no-hw-queueing | ||
| 68 | * + failed with FLAT_REG (DaVinci) | ||
| 69 | * + seems to behave with double buffering, PIO -and- CPPI | ||
| 70 | * + with gadgetfs + AIO, requests got lost? | ||
| 71 | * | ||
| 72 | * - RX/OUT | ||
| 73 | * + both pio and dma behave in with network and g_zero tests | ||
| 74 | * + dma is slow in typical case (short_not_ok is clear) | ||
| 75 | * + double buffering ok with PIO | ||
| 76 | * + double buffering *FAILS* with CPPI, wrong data bytes sometimes | ||
| 77 | * + request lossage observed with gadgetfs | ||
| 78 | * | ||
| 79 | * - ISO not tested ... might work, but only weakly isochronous | ||
| 80 | * | ||
| 81 | * - Gadget driver disabling of softconnect during bind() is ignored; so | ||
| 82 | * drivers can't hold off host requests until userspace is ready. | ||
| 83 | * (Workaround: they can turn it off later.) | ||
| 84 | * | ||
| 85 | * - PORTABILITY (assumes PIO works): | ||
| 86 | * + DaVinci, basically works with cppi dma | ||
| 87 | * + OMAP 2430, ditto with mentor dma | ||
| 88 | * + TUSB 6010, platform-specific dma in the works | ||
| 89 | */ | ||
| 90 | |||
| 91 | /* ----------------------------------------------------------------------- */ | ||
| 92 | |||
| 93 | /* | ||
| 94 | * Immediately complete a request. | ||
| 95 | * | ||
| 96 | * @param request the request to complete | ||
| 97 | * @param status the status to complete the request with | ||
| 98 | * Context: controller locked, IRQs blocked. | ||
| 99 | */ | ||
| 100 | void musb_g_giveback( | ||
| 101 | struct musb_ep *ep, | ||
| 102 | struct usb_request *request, | ||
| 103 | int status) | ||
| 104 | __releases(ep->musb->lock) | ||
| 105 | __acquires(ep->musb->lock) | ||
| 106 | { | ||
| 107 | struct musb_request *req; | ||
| 108 | struct musb *musb; | ||
| 109 | int busy = ep->busy; | ||
| 110 | |||
| 111 | req = to_musb_request(request); | ||
| 112 | |||
| 113 | list_del(&request->list); | ||
| 114 | if (req->request.status == -EINPROGRESS) | ||
| 115 | req->request.status = status; | ||
| 116 | musb = req->musb; | ||
| 117 | |||
| 118 | ep->busy = 1; | ||
| 119 | spin_unlock(&musb->lock); | ||
| 120 | if (is_dma_capable()) { | ||
| 121 | if (req->mapped) { | ||
| 122 | dma_unmap_single(musb->controller, | ||
| 123 | req->request.dma, | ||
| 124 | req->request.length, | ||
| 125 | req->tx | ||
| 126 | ? DMA_TO_DEVICE | ||
| 127 | : DMA_FROM_DEVICE); | ||
| 128 | req->request.dma = DMA_ADDR_INVALID; | ||
| 129 | req->mapped = 0; | ||
| 130 | } else if (req->request.dma != DMA_ADDR_INVALID) | ||
| 131 | dma_sync_single_for_cpu(musb->controller, | ||
| 132 | req->request.dma, | ||
| 133 | req->request.length, | ||
| 134 | req->tx | ||
| 135 | ? DMA_TO_DEVICE | ||
| 136 | : DMA_FROM_DEVICE); | ||
| 137 | } | ||
| 138 | if (request->status == 0) | ||
| 139 | DBG(5, "%s done request %p, %d/%d\n", | ||
| 140 | ep->end_point.name, request, | ||
| 141 | req->request.actual, req->request.length); | ||
| 142 | else | ||
| 143 | DBG(2, "%s request %p, %d/%d fault %d\n", | ||
| 144 | ep->end_point.name, request, | ||
| 145 | req->request.actual, req->request.length, | ||
| 146 | request->status); | ||
| 147 | req->request.complete(&req->ep->end_point, &req->request); | ||
| 148 | spin_lock(&musb->lock); | ||
| 149 | ep->busy = busy; | ||
| 150 | } | ||
| 151 | |||
| 152 | /* ----------------------------------------------------------------------- */ | ||
| 153 | |||
| 154 | /* | ||
| 155 | * Abort requests queued to an endpoint using the status. Synchronous. | ||
| 156 | * caller locked controller and blocked irqs, and selected this ep. | ||
| 157 | */ | ||
| 158 | static void nuke(struct musb_ep *ep, const int status) | ||
| 159 | { | ||
| 160 | struct musb_request *req = NULL; | ||
| 161 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; | ||
| 162 | |||
| 163 | ep->busy = 1; | ||
| 164 | |||
| 165 | if (is_dma_capable() && ep->dma) { | ||
| 166 | struct dma_controller *c = ep->musb->dma_controller; | ||
| 167 | int value; | ||
| 168 | if (ep->is_in) { | ||
| 169 | musb_writew(epio, MUSB_TXCSR, | ||
| 170 | 0 | MUSB_TXCSR_FLUSHFIFO); | ||
| 171 | musb_writew(epio, MUSB_TXCSR, | ||
| 172 | 0 | MUSB_TXCSR_FLUSHFIFO); | ||
| 173 | } else { | ||
| 174 | musb_writew(epio, MUSB_RXCSR, | ||
| 175 | 0 | MUSB_RXCSR_FLUSHFIFO); | ||
| 176 | musb_writew(epio, MUSB_RXCSR, | ||
| 177 | 0 | MUSB_RXCSR_FLUSHFIFO); | ||
| 178 | } | ||
| 179 | |||
| 180 | value = c->channel_abort(ep->dma); | ||
| 181 | DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); | ||
| 182 | c->channel_release(ep->dma); | ||
| 183 | ep->dma = NULL; | ||
| 184 | } | ||
| 185 | |||
| 186 | while (!list_empty(&(ep->req_list))) { | ||
| 187 | req = container_of(ep->req_list.next, struct musb_request, | ||
| 188 | request.list); | ||
| 189 | musb_g_giveback(ep, &req->request, status); | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | /* ----------------------------------------------------------------------- */ | ||
| 194 | |||
| 195 | /* Data transfers - pure PIO, pure DMA, or mixed mode */ | ||
| 196 | |||
| 197 | /* | ||
| 198 | * This assumes the separate CPPI engine is responding to DMA requests | ||
| 199 | * from the usb core ... sequenced a bit differently from mentor dma. | ||
| 200 | */ | ||
| 201 | |||
| 202 | static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) | ||
| 203 | { | ||
| 204 | if (can_bulk_split(musb, ep->type)) | ||
| 205 | return ep->hw_ep->max_packet_sz_tx; | ||
| 206 | else | ||
| 207 | return ep->packet_sz; | ||
| 208 | } | ||
| 209 | |||
| 210 | |||
| 211 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 212 | |||
| 213 | /* Peripheral tx (IN) using Mentor DMA works as follows: | ||
| 214 | Only mode 0 is used for transfers <= wPktSize, | ||
| 215 | mode 1 is used for larger transfers, | ||
| 216 | |||
| 217 | One of the following happens: | ||
| 218 | - Host sends IN token which causes an endpoint interrupt | ||
| 219 | -> TxAvail | ||
| 220 | -> if DMA is currently busy, exit. | ||
| 221 | -> if queue is non-empty, txstate(). | ||
| 222 | |||
| 223 | - Request is queued by the gadget driver. | ||
| 224 | -> if queue was previously empty, txstate() | ||
| 225 | |||
| 226 | txstate() | ||
| 227 | -> start | ||
| 228 | /\ -> setup DMA | ||
| 229 | | (data is transferred to the FIFO, then sent out when | ||
| 230 | | IN token(s) are recd from Host. | ||
| 231 | | -> DMA interrupt on completion | ||
| 232 | | calls TxAvail. | ||
| 233 | | -> stop DMA, ~DmaEenab, | ||
| 234 | | -> set TxPktRdy for last short pkt or zlp | ||
| 235 | | -> Complete Request | ||
| 236 | | -> Continue next request (call txstate) | ||
| 237 | |___________________________________| | ||
| 238 | |||
| 239 | * Non-Mentor DMA engines can of course work differently, such as by | ||
| 240 | * upleveling from irq-per-packet to irq-per-buffer. | ||
| 241 | */ | ||
| 242 | |||
| 243 | #endif | ||
| 244 | |||
| 245 | /* | ||
| 246 | * An endpoint is transmitting data. This can be called either from | ||
| 247 | * the IRQ routine or from ep.queue() to kickstart a request on an | ||
| 248 | * endpoint. | ||
| 249 | * | ||
| 250 | * Context: controller locked, IRQs blocked, endpoint selected | ||
| 251 | */ | ||
| 252 | static void txstate(struct musb *musb, struct musb_request *req) | ||
| 253 | { | ||
| 254 | u8 epnum = req->epnum; | ||
| 255 | struct musb_ep *musb_ep; | ||
| 256 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
| 257 | struct usb_request *request; | ||
| 258 | u16 fifo_count = 0, csr; | ||
| 259 | int use_dma = 0; | ||
| 260 | |||
| 261 | musb_ep = req->ep; | ||
| 262 | |||
| 263 | /* we shouldn't get here while DMA is active ... but we do ... */ | ||
| 264 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 265 | DBG(4, "dma pending...\n"); | ||
| 266 | return; | ||
| 267 | } | ||
| 268 | |||
| 269 | /* read TXCSR before */ | ||
| 270 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 271 | |||
| 272 | request = &req->request; | ||
| 273 | fifo_count = min(max_ep_writesize(musb, musb_ep), | ||
| 274 | (int)(request->length - request->actual)); | ||
| 275 | |||
| 276 | if (csr & MUSB_TXCSR_TXPKTRDY) { | ||
| 277 | DBG(5, "%s old packet still ready , txcsr %03x\n", | ||
| 278 | musb_ep->end_point.name, csr); | ||
| 279 | return; | ||
| 280 | } | ||
| 281 | |||
| 282 | if (csr & MUSB_TXCSR_P_SENDSTALL) { | ||
| 283 | DBG(5, "%s stalling, txcsr %03x\n", | ||
| 284 | musb_ep->end_point.name, csr); | ||
| 285 | return; | ||
| 286 | } | ||
| 287 | |||
| 288 | DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", | ||
| 289 | epnum, musb_ep->packet_sz, fifo_count, | ||
| 290 | csr); | ||
| 291 | |||
| 292 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
| 293 | if (is_dma_capable() && musb_ep->dma) { | ||
| 294 | struct dma_controller *c = musb->dma_controller; | ||
| 295 | |||
| 296 | use_dma = (request->dma != DMA_ADDR_INVALID); | ||
| 297 | |||
| 298 | /* MUSB_TXCSR_P_ISO is still set correctly */ | ||
| 299 | |||
| 300 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 301 | { | ||
| 302 | size_t request_size; | ||
| 303 | |||
| 304 | /* setup DMA, then program endpoint CSR */ | ||
| 305 | request_size = min(request->length, | ||
| 306 | musb_ep->dma->max_len); | ||
| 307 | if (request_size <= musb_ep->packet_sz) | ||
| 308 | musb_ep->dma->desired_mode = 0; | ||
| 309 | else | ||
| 310 | musb_ep->dma->desired_mode = 1; | ||
| 311 | |||
| 312 | use_dma = use_dma && c->channel_program( | ||
| 313 | musb_ep->dma, musb_ep->packet_sz, | ||
| 314 | musb_ep->dma->desired_mode, | ||
| 315 | request->dma, request_size); | ||
| 316 | if (use_dma) { | ||
| 317 | if (musb_ep->dma->desired_mode == 0) { | ||
| 318 | /* ASSERT: DMAENAB is clear */ | ||
| 319 | csr &= ~(MUSB_TXCSR_AUTOSET | | ||
| 320 | MUSB_TXCSR_DMAMODE); | ||
| 321 | csr |= (MUSB_TXCSR_DMAENAB | | ||
| 322 | MUSB_TXCSR_MODE); | ||
| 323 | /* against programming guide */ | ||
| 324 | } else | ||
| 325 | csr |= (MUSB_TXCSR_AUTOSET | ||
| 326 | | MUSB_TXCSR_DMAENAB | ||
| 327 | | MUSB_TXCSR_DMAMODE | ||
| 328 | | MUSB_TXCSR_MODE); | ||
| 329 | |||
| 330 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | ||
| 331 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | ||
| 336 | /* program endpoint CSR first, then setup DMA */ | ||
| 337 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 338 | | MUSB_TXCSR_DMAMODE | ||
| 339 | | MUSB_TXCSR_P_UNDERRUN | ||
| 340 | | MUSB_TXCSR_TXPKTRDY); | ||
| 341 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; | ||
| 342 | musb_writew(epio, MUSB_TXCSR, | ||
| 343 | (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | ||
| 344 | | csr); | ||
| 345 | |||
| 346 | /* ensure writebuffer is empty */ | ||
| 347 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 348 | |||
| 349 | /* NOTE host side sets DMAENAB later than this; both are | ||
| 350 | * OK since the transfer dma glue (between CPPI and Mentor | ||
| 351 | * fifos) just tells CPPI it could start. Data only moves | ||
| 352 | * to the USB TX fifo when both fifos are ready. | ||
| 353 | */ | ||
| 354 | |||
| 355 | /* "mode" is irrelevant here; handle terminating ZLPs like | ||
| 356 | * PIO does, since the hardware RNDIS mode seems unreliable | ||
| 357 | * except for the last-packet-is-already-short case. | ||
| 358 | */ | ||
| 359 | use_dma = use_dma && c->channel_program( | ||
| 360 | musb_ep->dma, musb_ep->packet_sz, | ||
| 361 | 0, | ||
| 362 | request->dma, | ||
| 363 | request->length); | ||
| 364 | if (!use_dma) { | ||
| 365 | c->channel_release(musb_ep->dma); | ||
| 366 | musb_ep->dma = NULL; | ||
| 367 | /* ASSERT: DMAENAB clear */ | ||
| 368 | csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); | ||
| 369 | /* invariant: prequest->buf is non-null */ | ||
| 370 | } | ||
| 371 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
| 372 | use_dma = use_dma && c->channel_program( | ||
| 373 | musb_ep->dma, musb_ep->packet_sz, | ||
| 374 | request->zero, | ||
| 375 | request->dma, | ||
| 376 | request->length); | ||
| 377 | #endif | ||
| 378 | } | ||
| 379 | #endif | ||
| 380 | |||
| 381 | if (!use_dma) { | ||
| 382 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | ||
| 383 | (u8 *) (request->buf + request->actual)); | ||
| 384 | request->actual += fifo_count; | ||
| 385 | csr |= MUSB_TXCSR_TXPKTRDY; | ||
| 386 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | ||
| 387 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 388 | } | ||
| 389 | |||
| 390 | /* host may already have the data when this message shows... */ | ||
| 391 | DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", | ||
| 392 | musb_ep->end_point.name, use_dma ? "dma" : "pio", | ||
| 393 | request->actual, request->length, | ||
| 394 | musb_readw(epio, MUSB_TXCSR), | ||
| 395 | fifo_count, | ||
| 396 | musb_readw(epio, MUSB_TXMAXP)); | ||
| 397 | } | ||
| 398 | |||
| 399 | /* | ||
| 400 | * FIFO state update (e.g. data ready). | ||
| 401 | * Called from IRQ, with controller locked. | ||
| 402 | */ | ||
| 403 | void musb_g_tx(struct musb *musb, u8 epnum) | ||
| 404 | { | ||
| 405 | u16 csr; | ||
| 406 | struct usb_request *request; | ||
| 407 | u8 __iomem *mbase = musb->mregs; | ||
| 408 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; | ||
| 409 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
| 410 | struct dma_channel *dma; | ||
| 411 | |||
| 412 | musb_ep_select(mbase, epnum); | ||
| 413 | request = next_request(musb_ep); | ||
| 414 | |||
| 415 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 416 | DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); | ||
| 417 | |||
| 418 | dma = is_dma_capable() ? musb_ep->dma : NULL; | ||
| 419 | do { | ||
| 420 | /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX | ||
| 421 | * probably rates reporting as a host error | ||
| 422 | */ | ||
| 423 | if (csr & MUSB_TXCSR_P_SENTSTALL) { | ||
| 424 | csr |= MUSB_TXCSR_P_WZC_BITS; | ||
| 425 | csr &= ~MUSB_TXCSR_P_SENTSTALL; | ||
| 426 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 427 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 428 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
| 429 | musb->dma_controller->channel_abort(dma); | ||
| 430 | } | ||
| 431 | |||
| 432 | if (request) | ||
| 433 | musb_g_giveback(musb_ep, request, -EPIPE); | ||
| 434 | |||
| 435 | break; | ||
| 436 | } | ||
| 437 | |||
| 438 | if (csr & MUSB_TXCSR_P_UNDERRUN) { | ||
| 439 | /* we NAKed, no big deal ... little reason to care */ | ||
| 440 | csr |= MUSB_TXCSR_P_WZC_BITS; | ||
| 441 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | ||
| 442 | | MUSB_TXCSR_TXPKTRDY); | ||
| 443 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 444 | DBG(20, "underrun on ep%d, req %p\n", epnum, request); | ||
| 445 | } | ||
| 446 | |||
| 447 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 448 | /* SHOULD NOT HAPPEN ... has with cppi though, after | ||
| 449 | * changing SENDSTALL (and other cases); harmless? | ||
| 450 | */ | ||
| 451 | DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); | ||
| 452 | break; | ||
| 453 | } | ||
| 454 | |||
| 455 | if (request) { | ||
| 456 | u8 is_dma = 0; | ||
| 457 | |||
| 458 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { | ||
| 459 | is_dma = 1; | ||
| 460 | csr |= MUSB_TXCSR_P_WZC_BITS; | ||
| 461 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
| 462 | | MUSB_TXCSR_P_UNDERRUN | ||
| 463 | | MUSB_TXCSR_TXPKTRDY); | ||
| 464 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 465 | /* ensure writebuffer is empty */ | ||
| 466 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 467 | request->actual += musb_ep->dma->actual_len; | ||
| 468 | DBG(4, "TXCSR%d %04x, dma off, " | ||
| 469 | "len %zu, req %p\n", | ||
| 470 | epnum, csr, | ||
| 471 | musb_ep->dma->actual_len, | ||
| 472 | request); | ||
| 473 | } | ||
| 474 | |||
| 475 | if (is_dma || request->actual == request->length) { | ||
| 476 | |||
| 477 | /* First, maybe a terminating short packet. | ||
| 478 | * Some DMA engines might handle this by | ||
| 479 | * themselves. | ||
| 480 | */ | ||
| 481 | if ((request->zero | ||
| 482 | && request->length | ||
| 483 | && (request->length | ||
| 484 | % musb_ep->packet_sz) | ||
| 485 | == 0) | ||
| 486 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 487 | || (is_dma && | ||
| 488 | ((!dma->desired_mode) || | ||
| 489 | (request->actual & | ||
| 490 | (musb_ep->packet_sz - 1)))) | ||
| 491 | #endif | ||
| 492 | ) { | ||
| 493 | /* on dma completion, fifo may not | ||
| 494 | * be available yet ... | ||
| 495 | */ | ||
| 496 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
| 497 | break; | ||
| 498 | |||
| 499 | DBG(4, "sending zero pkt\n"); | ||
| 500 | musb_writew(epio, MUSB_TXCSR, | ||
| 501 | MUSB_TXCSR_MODE | ||
| 502 | | MUSB_TXCSR_TXPKTRDY); | ||
| 503 | request->zero = 0; | ||
| 504 | } | ||
| 505 | |||
| 506 | /* ... or if not, then complete it */ | ||
| 507 | musb_g_giveback(musb_ep, request, 0); | ||
| 508 | |||
| 509 | /* kickstart next transfer if appropriate; | ||
| 510 | * the packet that just completed might not | ||
| 511 | * be transmitted for hours or days. | ||
| 512 | * REVISIT for double buffering... | ||
| 513 | * FIXME revisit for stalls too... | ||
| 514 | */ | ||
| 515 | musb_ep_select(mbase, epnum); | ||
| 516 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 517 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
| 518 | break; | ||
| 519 | request = musb_ep->desc | ||
| 520 | ? next_request(musb_ep) | ||
| 521 | : NULL; | ||
| 522 | if (!request) { | ||
| 523 | DBG(4, "%s idle now\n", | ||
| 524 | musb_ep->end_point.name); | ||
| 525 | break; | ||
| 526 | } | ||
| 527 | } | ||
| 528 | |||
| 529 | txstate(musb, to_musb_request(request)); | ||
| 530 | } | ||
| 531 | |||
| 532 | } while (0); | ||
| 533 | } | ||
| 534 | |||
| 535 | /* ------------------------------------------------------------ */ | ||
| 536 | |||
| 537 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 538 | |||
| 539 | /* Peripheral rx (OUT) using Mentor DMA works as follows: | ||
| 540 | - Only mode 0 is used. | ||
| 541 | |||
| 542 | - Request is queued by the gadget class driver. | ||
| 543 | -> if queue was previously empty, rxstate() | ||
| 544 | |||
| 545 | - Host sends OUT token which causes an endpoint interrupt | ||
| 546 | /\ -> RxReady | ||
| 547 | | -> if request queued, call rxstate | ||
| 548 | | /\ -> setup DMA | ||
| 549 | | | -> DMA interrupt on completion | ||
| 550 | | | -> RxReady | ||
| 551 | | | -> stop DMA | ||
| 552 | | | -> ack the read | ||
| 553 | | | -> if data recd = max expected | ||
| 554 | | | by the request, or host | ||
| 555 | | | sent a short packet, | ||
| 556 | | | complete the request, | ||
| 557 | | | and start the next one. | ||
| 558 | | |_____________________________________| | ||
| 559 | | else just wait for the host | ||
| 560 | | to send the next OUT token. | ||
| 561 | |__________________________________________________| | ||
| 562 | |||
| 563 | * Non-Mentor DMA engines can of course work differently. | ||
| 564 | */ | ||
| 565 | |||
| 566 | #endif | ||
| 567 | |||
| 568 | /* | ||
| 569 | * Context: controller locked, IRQs blocked, endpoint selected | ||
| 570 | */ | ||
| 571 | static void rxstate(struct musb *musb, struct musb_request *req) | ||
| 572 | { | ||
| 573 | u16 csr = 0; | ||
| 574 | const u8 epnum = req->epnum; | ||
| 575 | struct usb_request *request = &req->request; | ||
| 576 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | ||
| 577 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
| 578 | u16 fifo_count = 0; | ||
| 579 | u16 len = musb_ep->packet_sz; | ||
| 580 | |||
| 581 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 582 | |||
| 583 | if (is_cppi_enabled() && musb_ep->dma) { | ||
| 584 | struct dma_controller *c = musb->dma_controller; | ||
| 585 | struct dma_channel *channel = musb_ep->dma; | ||
| 586 | |||
| 587 | /* NOTE: CPPI won't actually stop advancing the DMA | ||
| 588 | * queue after short packet transfers, so this is almost | ||
| 589 | * always going to run as IRQ-per-packet DMA so that | ||
| 590 | * faults will be handled correctly. | ||
| 591 | */ | ||
| 592 | if (c->channel_program(channel, | ||
| 593 | musb_ep->packet_sz, | ||
| 594 | !request->short_not_ok, | ||
| 595 | request->dma + request->actual, | ||
| 596 | request->length - request->actual)) { | ||
| 597 | |||
| 598 | /* make sure that if an rxpkt arrived after the irq, | ||
| 599 | * the cppi engine will be ready to take it as soon | ||
| 600 | * as DMA is enabled | ||
| 601 | */ | ||
| 602 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | ||
| 603 | | MUSB_RXCSR_DMAMODE); | ||
| 604 | csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; | ||
| 605 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 606 | return; | ||
| 607 | } | ||
| 608 | } | ||
| 609 | |||
| 610 | if (csr & MUSB_RXCSR_RXPKTRDY) { | ||
| 611 | len = musb_readw(epio, MUSB_RXCOUNT); | ||
| 612 | if (request->actual < request->length) { | ||
| 613 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 614 | if (is_dma_capable() && musb_ep->dma) { | ||
| 615 | struct dma_controller *c; | ||
| 616 | struct dma_channel *channel; | ||
| 617 | int use_dma = 0; | ||
| 618 | |||
| 619 | c = musb->dma_controller; | ||
| 620 | channel = musb_ep->dma; | ||
| 621 | |||
| 622 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in | ||
| 623 | * mode 0 only. So we do not get endpoint interrupts due to DMA | ||
| 624 | * completion. We only get interrupts from DMA controller. | ||
| 625 | * | ||
| 626 | * We could operate in DMA mode 1 if we knew the size of the tranfer | ||
| 627 | * in advance. For mass storage class, request->length = what the host | ||
| 628 | * sends, so that'd work. But for pretty much everything else, | ||
| 629 | * request->length is routinely more than what the host sends. For | ||
| 630 | * most these gadgets, end of is signified either by a short packet, | ||
| 631 | * or filling the last byte of the buffer. (Sending extra data in | ||
| 632 | * that last pckate should trigger an overflow fault.) But in mode 1, | ||
| 633 | * we don't get DMA completion interrrupt for short packets. | ||
| 634 | * | ||
| 635 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), | ||
| 636 | * to get endpoint interrupt on every DMA req, but that didn't seem | ||
| 637 | * to work reliably. | ||
| 638 | * | ||
| 639 | * REVISIT an updated g_file_storage can set req->short_not_ok, which | ||
| 640 | * then becomes usable as a runtime "use mode 1" hint... | ||
| 641 | */ | ||
| 642 | |||
| 643 | csr |= MUSB_RXCSR_DMAENAB; | ||
| 644 | #ifdef USE_MODE1 | ||
| 645 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
| 646 | /* csr |= MUSB_RXCSR_DMAMODE; */ | ||
| 647 | |||
| 648 | /* this special sequence (enabling and then | ||
| 649 | * disabling MUSB_RXCSR_DMAMODE) is required | ||
| 650 | * to get DMAReq to activate | ||
| 651 | */ | ||
| 652 | musb_writew(epio, MUSB_RXCSR, | ||
| 653 | csr | MUSB_RXCSR_DMAMODE); | ||
| 654 | #endif | ||
| 655 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 656 | |||
| 657 | if (request->actual < request->length) { | ||
| 658 | int transfer_size = 0; | ||
| 659 | #ifdef USE_MODE1 | ||
| 660 | transfer_size = min(request->length, | ||
| 661 | channel->max_len); | ||
| 662 | #else | ||
| 663 | transfer_size = len; | ||
| 664 | #endif | ||
| 665 | if (transfer_size <= musb_ep->packet_sz) | ||
| 666 | musb_ep->dma->desired_mode = 0; | ||
| 667 | else | ||
| 668 | musb_ep->dma->desired_mode = 1; | ||
| 669 | |||
| 670 | use_dma = c->channel_program( | ||
| 671 | channel, | ||
| 672 | musb_ep->packet_sz, | ||
| 673 | channel->desired_mode, | ||
| 674 | request->dma | ||
| 675 | + request->actual, | ||
| 676 | transfer_size); | ||
| 677 | } | ||
| 678 | |||
| 679 | if (use_dma) | ||
| 680 | return; | ||
| 681 | } | ||
| 682 | #endif /* Mentor's DMA */ | ||
| 683 | |||
| 684 | fifo_count = request->length - request->actual; | ||
| 685 | DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", | ||
| 686 | musb_ep->end_point.name, | ||
| 687 | len, fifo_count, | ||
| 688 | musb_ep->packet_sz); | ||
| 689 | |||
| 690 | fifo_count = min(len, fifo_count); | ||
| 691 | |||
| 692 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | ||
| 693 | if (tusb_dma_omap() && musb_ep->dma) { | ||
| 694 | struct dma_controller *c = musb->dma_controller; | ||
| 695 | struct dma_channel *channel = musb_ep->dma; | ||
| 696 | u32 dma_addr = request->dma + request->actual; | ||
| 697 | int ret; | ||
| 698 | |||
| 699 | ret = c->channel_program(channel, | ||
| 700 | musb_ep->packet_sz, | ||
| 701 | channel->desired_mode, | ||
| 702 | dma_addr, | ||
| 703 | fifo_count); | ||
| 704 | if (ret) | ||
| 705 | return; | ||
| 706 | } | ||
| 707 | #endif | ||
| 708 | |||
| 709 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) | ||
| 710 | (request->buf + request->actual)); | ||
| 711 | request->actual += fifo_count; | ||
| 712 | |||
| 713 | /* REVISIT if we left anything in the fifo, flush | ||
| 714 | * it and report -EOVERFLOW | ||
| 715 | */ | ||
| 716 | |||
| 717 | /* ack the read! */ | ||
| 718 | csr |= MUSB_RXCSR_P_WZC_BITS; | ||
| 719 | csr &= ~MUSB_RXCSR_RXPKTRDY; | ||
| 720 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 721 | } | ||
| 722 | } | ||
| 723 | |||
| 724 | /* reach the end or short packet detected */ | ||
| 725 | if (request->actual == request->length || len < musb_ep->packet_sz) | ||
| 726 | musb_g_giveback(musb_ep, request, 0); | ||
| 727 | } | ||
| 728 | |||
| 729 | /* | ||
| 730 | * Data ready for a request; called from IRQ | ||
| 731 | */ | ||
| 732 | void musb_g_rx(struct musb *musb, u8 epnum) | ||
| 733 | { | ||
| 734 | u16 csr; | ||
| 735 | struct usb_request *request; | ||
| 736 | void __iomem *mbase = musb->mregs; | ||
| 737 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | ||
| 738 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
| 739 | struct dma_channel *dma; | ||
| 740 | |||
| 741 | musb_ep_select(mbase, epnum); | ||
| 742 | |||
| 743 | request = next_request(musb_ep); | ||
| 744 | |||
| 745 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 746 | dma = is_dma_capable() ? musb_ep->dma : NULL; | ||
| 747 | |||
| 748 | DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, | ||
| 749 | csr, dma ? " (dma)" : "", request); | ||
| 750 | |||
| 751 | if (csr & MUSB_RXCSR_P_SENTSTALL) { | ||
| 752 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 753 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
| 754 | (void) musb->dma_controller->channel_abort(dma); | ||
| 755 | request->actual += musb_ep->dma->actual_len; | ||
| 756 | } | ||
| 757 | |||
| 758 | csr |= MUSB_RXCSR_P_WZC_BITS; | ||
| 759 | csr &= ~MUSB_RXCSR_P_SENTSTALL; | ||
| 760 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 761 | |||
| 762 | if (request) | ||
| 763 | musb_g_giveback(musb_ep, request, -EPIPE); | ||
| 764 | goto done; | ||
| 765 | } | ||
| 766 | |||
| 767 | if (csr & MUSB_RXCSR_P_OVERRUN) { | ||
| 768 | /* csr |= MUSB_RXCSR_P_WZC_BITS; */ | ||
| 769 | csr &= ~MUSB_RXCSR_P_OVERRUN; | ||
| 770 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 771 | |||
| 772 | DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); | ||
| 773 | if (request && request->status == -EINPROGRESS) | ||
| 774 | request->status = -EOVERFLOW; | ||
| 775 | } | ||
| 776 | if (csr & MUSB_RXCSR_INCOMPRX) { | ||
| 777 | /* REVISIT not necessarily an error */ | ||
| 778 | DBG(4, "%s, incomprx\n", musb_ep->end_point.name); | ||
| 779 | } | ||
| 780 | |||
| 781 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 782 | /* "should not happen"; likely RXPKTRDY pending for DMA */ | ||
| 783 | DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, | ||
| 784 | "%s busy, csr %04x\n", | ||
| 785 | musb_ep->end_point.name, csr); | ||
| 786 | goto done; | ||
| 787 | } | ||
| 788 | |||
| 789 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { | ||
| 790 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | ||
| 791 | | MUSB_RXCSR_DMAENAB | ||
| 792 | | MUSB_RXCSR_DMAMODE); | ||
| 793 | musb_writew(epio, MUSB_RXCSR, | ||
| 794 | MUSB_RXCSR_P_WZC_BITS | csr); | ||
| 795 | |||
| 796 | request->actual += musb_ep->dma->actual_len; | ||
| 797 | |||
| 798 | DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", | ||
| 799 | epnum, csr, | ||
| 800 | musb_readw(epio, MUSB_RXCSR), | ||
| 801 | musb_ep->dma->actual_len, request); | ||
| 802 | |||
| 803 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
| 804 | /* Autoclear doesn't clear RxPktRdy for short packets */ | ||
| 805 | if ((dma->desired_mode == 0) | ||
| 806 | || (dma->actual_len | ||
| 807 | & (musb_ep->packet_sz - 1))) { | ||
| 808 | /* ack the read! */ | ||
| 809 | csr &= ~MUSB_RXCSR_RXPKTRDY; | ||
| 810 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 811 | } | ||
| 812 | |||
| 813 | /* incomplete, and not short? wait for next IN packet */ | ||
| 814 | if ((request->actual < request->length) | ||
| 815 | && (musb_ep->dma->actual_len | ||
| 816 | == musb_ep->packet_sz)) | ||
| 817 | goto done; | ||
| 818 | #endif | ||
| 819 | musb_g_giveback(musb_ep, request, 0); | ||
| 820 | |||
| 821 | request = next_request(musb_ep); | ||
| 822 | if (!request) | ||
| 823 | goto done; | ||
| 824 | |||
| 825 | /* don't start more i/o till the stall clears */ | ||
| 826 | musb_ep_select(mbase, epnum); | ||
| 827 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 828 | if (csr & MUSB_RXCSR_P_SENDSTALL) | ||
| 829 | goto done; | ||
| 830 | } | ||
| 831 | |||
| 832 | |||
| 833 | /* analyze request if the ep is hot */ | ||
| 834 | if (request) | ||
| 835 | rxstate(musb, to_musb_request(request)); | ||
| 836 | else | ||
| 837 | DBG(3, "packet waiting for %s%s request\n", | ||
| 838 | musb_ep->desc ? "" : "inactive ", | ||
| 839 | musb_ep->end_point.name); | ||
| 840 | |||
| 841 | done: | ||
| 842 | return; | ||
| 843 | } | ||
| 844 | |||
| 845 | /* ------------------------------------------------------------ */ | ||
| 846 | |||
| 847 | static int musb_gadget_enable(struct usb_ep *ep, | ||
| 848 | const struct usb_endpoint_descriptor *desc) | ||
| 849 | { | ||
| 850 | unsigned long flags; | ||
| 851 | struct musb_ep *musb_ep; | ||
| 852 | struct musb_hw_ep *hw_ep; | ||
| 853 | void __iomem *regs; | ||
| 854 | struct musb *musb; | ||
| 855 | void __iomem *mbase; | ||
| 856 | u8 epnum; | ||
| 857 | u16 csr; | ||
| 858 | unsigned tmp; | ||
| 859 | int status = -EINVAL; | ||
| 860 | |||
| 861 | if (!ep || !desc) | ||
| 862 | return -EINVAL; | ||
| 863 | |||
| 864 | musb_ep = to_musb_ep(ep); | ||
| 865 | hw_ep = musb_ep->hw_ep; | ||
| 866 | regs = hw_ep->regs; | ||
| 867 | musb = musb_ep->musb; | ||
| 868 | mbase = musb->mregs; | ||
| 869 | epnum = musb_ep->current_epnum; | ||
| 870 | |||
| 871 | spin_lock_irqsave(&musb->lock, flags); | ||
| 872 | |||
| 873 | if (musb_ep->desc) { | ||
| 874 | status = -EBUSY; | ||
| 875 | goto fail; | ||
| 876 | } | ||
| 877 | musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; | ||
| 878 | |||
| 879 | /* check direction and (later) maxpacket size against endpoint */ | ||
| 880 | if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum) | ||
| 881 | goto fail; | ||
| 882 | |||
| 883 | /* REVISIT this rules out high bandwidth periodic transfers */ | ||
| 884 | tmp = le16_to_cpu(desc->wMaxPacketSize); | ||
| 885 | if (tmp & ~0x07ff) | ||
| 886 | goto fail; | ||
| 887 | musb_ep->packet_sz = tmp; | ||
| 888 | |||
| 889 | /* enable the interrupts for the endpoint, set the endpoint | ||
| 890 | * packet size (or fail), set the mode, clear the fifo | ||
| 891 | */ | ||
| 892 | musb_ep_select(mbase, epnum); | ||
| 893 | if (desc->bEndpointAddress & USB_DIR_IN) { | ||
| 894 | u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
| 895 | |||
| 896 | if (hw_ep->is_shared_fifo) | ||
| 897 | musb_ep->is_in = 1; | ||
| 898 | if (!musb_ep->is_in) | ||
| 899 | goto fail; | ||
| 900 | if (tmp > hw_ep->max_packet_sz_tx) | ||
| 901 | goto fail; | ||
| 902 | |||
| 903 | int_txe |= (1 << epnum); | ||
| 904 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
| 905 | |||
| 906 | /* REVISIT if can_bulk_split(), use by updating "tmp"; | ||
| 907 | * likewise high bandwidth periodic tx | ||
| 908 | */ | ||
| 909 | musb_writew(regs, MUSB_TXMAXP, tmp); | ||
| 910 | |||
| 911 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | ||
| 912 | if (musb_readw(regs, MUSB_TXCSR) | ||
| 913 | & MUSB_TXCSR_FIFONOTEMPTY) | ||
| 914 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
| 915 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) | ||
| 916 | csr |= MUSB_TXCSR_P_ISO; | ||
| 917 | |||
| 918 | /* set twice in case of double buffering */ | ||
| 919 | musb_writew(regs, MUSB_TXCSR, csr); | ||
| 920 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | ||
| 921 | musb_writew(regs, MUSB_TXCSR, csr); | ||
| 922 | |||
| 923 | } else { | ||
| 924 | u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); | ||
| 925 | |||
| 926 | if (hw_ep->is_shared_fifo) | ||
| 927 | musb_ep->is_in = 0; | ||
| 928 | if (musb_ep->is_in) | ||
| 929 | goto fail; | ||
| 930 | if (tmp > hw_ep->max_packet_sz_rx) | ||
| 931 | goto fail; | ||
| 932 | |||
| 933 | int_rxe |= (1 << epnum); | ||
| 934 | musb_writew(mbase, MUSB_INTRRXE, int_rxe); | ||
| 935 | |||
| 936 | /* REVISIT if can_bulk_combine() use by updating "tmp" | ||
| 937 | * likewise high bandwidth periodic rx | ||
| 938 | */ | ||
| 939 | musb_writew(regs, MUSB_RXMAXP, tmp); | ||
| 940 | |||
| 941 | /* force shared fifo to OUT-only mode */ | ||
| 942 | if (hw_ep->is_shared_fifo) { | ||
| 943 | csr = musb_readw(regs, MUSB_TXCSR); | ||
| 944 | csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); | ||
| 945 | musb_writew(regs, MUSB_TXCSR, csr); | ||
| 946 | } | ||
| 947 | |||
| 948 | csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; | ||
| 949 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) | ||
| 950 | csr |= MUSB_RXCSR_P_ISO; | ||
| 951 | else if (musb_ep->type == USB_ENDPOINT_XFER_INT) | ||
| 952 | csr |= MUSB_RXCSR_DISNYET; | ||
| 953 | |||
| 954 | /* set twice in case of double buffering */ | ||
| 955 | musb_writew(regs, MUSB_RXCSR, csr); | ||
| 956 | musb_writew(regs, MUSB_RXCSR, csr); | ||
| 957 | } | ||
| 958 | |||
| 959 | /* NOTE: all the I/O code _should_ work fine without DMA, in case | ||
| 960 | * for some reason you run out of channels here. | ||
| 961 | */ | ||
| 962 | if (is_dma_capable() && musb->dma_controller) { | ||
| 963 | struct dma_controller *c = musb->dma_controller; | ||
| 964 | |||
| 965 | musb_ep->dma = c->channel_alloc(c, hw_ep, | ||
| 966 | (desc->bEndpointAddress & USB_DIR_IN)); | ||
| 967 | } else | ||
| 968 | musb_ep->dma = NULL; | ||
| 969 | |||
| 970 | musb_ep->desc = desc; | ||
| 971 | musb_ep->busy = 0; | ||
| 972 | status = 0; | ||
| 973 | |||
| 974 | pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", | ||
| 975 | musb_driver_name, musb_ep->end_point.name, | ||
| 976 | ({ char *s; switch (musb_ep->type) { | ||
| 977 | case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; | ||
| 978 | case USB_ENDPOINT_XFER_INT: s = "int"; break; | ||
| 979 | default: s = "iso"; break; | ||
| 980 | }; s; }), | ||
| 981 | musb_ep->is_in ? "IN" : "OUT", | ||
| 982 | musb_ep->dma ? "dma, " : "", | ||
| 983 | musb_ep->packet_sz); | ||
| 984 | |||
| 985 | schedule_work(&musb->irq_work); | ||
| 986 | |||
| 987 | fail: | ||
| 988 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 989 | return status; | ||
| 990 | } | ||
| 991 | |||
| 992 | /* | ||
| 993 | * Disable an endpoint flushing all requests queued. | ||
| 994 | */ | ||
| 995 | static int musb_gadget_disable(struct usb_ep *ep) | ||
| 996 | { | ||
| 997 | unsigned long flags; | ||
| 998 | struct musb *musb; | ||
| 999 | u8 epnum; | ||
| 1000 | struct musb_ep *musb_ep; | ||
| 1001 | void __iomem *epio; | ||
| 1002 | int status = 0; | ||
| 1003 | |||
| 1004 | musb_ep = to_musb_ep(ep); | ||
| 1005 | musb = musb_ep->musb; | ||
| 1006 | epnum = musb_ep->current_epnum; | ||
| 1007 | epio = musb->endpoints[epnum].regs; | ||
| 1008 | |||
| 1009 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1010 | musb_ep_select(musb->mregs, epnum); | ||
| 1011 | |||
| 1012 | /* zero the endpoint sizes */ | ||
| 1013 | if (musb_ep->is_in) { | ||
| 1014 | u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); | ||
| 1015 | int_txe &= ~(1 << epnum); | ||
| 1016 | musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); | ||
| 1017 | musb_writew(epio, MUSB_TXMAXP, 0); | ||
| 1018 | } else { | ||
| 1019 | u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); | ||
| 1020 | int_rxe &= ~(1 << epnum); | ||
| 1021 | musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); | ||
| 1022 | musb_writew(epio, MUSB_RXMAXP, 0); | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | musb_ep->desc = NULL; | ||
| 1026 | |||
| 1027 | /* abort all pending DMA and requests */ | ||
| 1028 | nuke(musb_ep, -ESHUTDOWN); | ||
| 1029 | |||
| 1030 | schedule_work(&musb->irq_work); | ||
| 1031 | |||
| 1032 | spin_unlock_irqrestore(&(musb->lock), flags); | ||
| 1033 | |||
| 1034 | DBG(2, "%s\n", musb_ep->end_point.name); | ||
| 1035 | |||
| 1036 | return status; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | /* | ||
| 1040 | * Allocate a request for an endpoint. | ||
| 1041 | * Reused by ep0 code. | ||
| 1042 | */ | ||
| 1043 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) | ||
| 1044 | { | ||
| 1045 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
| 1046 | struct musb_request *request = NULL; | ||
| 1047 | |||
| 1048 | request = kzalloc(sizeof *request, gfp_flags); | ||
| 1049 | if (request) { | ||
| 1050 | INIT_LIST_HEAD(&request->request.list); | ||
| 1051 | request->request.dma = DMA_ADDR_INVALID; | ||
| 1052 | request->epnum = musb_ep->current_epnum; | ||
| 1053 | request->ep = musb_ep; | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | return &request->request; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | /* | ||
| 1060 | * Free a request | ||
| 1061 | * Reused by ep0 code. | ||
| 1062 | */ | ||
| 1063 | void musb_free_request(struct usb_ep *ep, struct usb_request *req) | ||
| 1064 | { | ||
| 1065 | kfree(to_musb_request(req)); | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | static LIST_HEAD(buffers); | ||
| 1069 | |||
| 1070 | struct free_record { | ||
| 1071 | struct list_head list; | ||
| 1072 | struct device *dev; | ||
| 1073 | unsigned bytes; | ||
| 1074 | dma_addr_t dma; | ||
| 1075 | }; | ||
| 1076 | |||
| 1077 | /* | ||
| 1078 | * Context: controller locked, IRQs blocked. | ||
| 1079 | */ | ||
| 1080 | static void musb_ep_restart(struct musb *musb, struct musb_request *req) | ||
| 1081 | { | ||
| 1082 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", | ||
| 1083 | req->tx ? "TX/IN" : "RX/OUT", | ||
| 1084 | &req->request, req->request.length, req->epnum); | ||
| 1085 | |||
| 1086 | musb_ep_select(musb->mregs, req->epnum); | ||
| 1087 | if (req->tx) | ||
| 1088 | txstate(musb, req); | ||
| 1089 | else | ||
| 1090 | rxstate(musb, req); | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | ||
| 1094 | gfp_t gfp_flags) | ||
| 1095 | { | ||
| 1096 | struct musb_ep *musb_ep; | ||
| 1097 | struct musb_request *request; | ||
| 1098 | struct musb *musb; | ||
| 1099 | int status = 0; | ||
| 1100 | unsigned long lockflags; | ||
| 1101 | |||
| 1102 | if (!ep || !req) | ||
| 1103 | return -EINVAL; | ||
| 1104 | if (!req->buf) | ||
| 1105 | return -ENODATA; | ||
| 1106 | |||
| 1107 | musb_ep = to_musb_ep(ep); | ||
| 1108 | musb = musb_ep->musb; | ||
| 1109 | |||
| 1110 | request = to_musb_request(req); | ||
| 1111 | request->musb = musb; | ||
| 1112 | |||
| 1113 | if (request->ep != musb_ep) | ||
| 1114 | return -EINVAL; | ||
| 1115 | |||
| 1116 | DBG(4, "<== to %s request=%p\n", ep->name, req); | ||
| 1117 | |||
| 1118 | /* request is mine now... */ | ||
| 1119 | request->request.actual = 0; | ||
| 1120 | request->request.status = -EINPROGRESS; | ||
| 1121 | request->epnum = musb_ep->current_epnum; | ||
| 1122 | request->tx = musb_ep->is_in; | ||
| 1123 | |||
| 1124 | if (is_dma_capable() && musb_ep->dma) { | ||
| 1125 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
| 1126 | request->request.dma = dma_map_single( | ||
| 1127 | musb->controller, | ||
| 1128 | request->request.buf, | ||
| 1129 | request->request.length, | ||
| 1130 | request->tx | ||
| 1131 | ? DMA_TO_DEVICE | ||
| 1132 | : DMA_FROM_DEVICE); | ||
| 1133 | request->mapped = 1; | ||
| 1134 | } else { | ||
| 1135 | dma_sync_single_for_device(musb->controller, | ||
| 1136 | request->request.dma, | ||
| 1137 | request->request.length, | ||
| 1138 | request->tx | ||
| 1139 | ? DMA_TO_DEVICE | ||
| 1140 | : DMA_FROM_DEVICE); | ||
| 1141 | request->mapped = 0; | ||
| 1142 | } | ||
| 1143 | } else if (!req->buf) { | ||
| 1144 | return -ENODATA; | ||
| 1145 | } else | ||
| 1146 | request->mapped = 0; | ||
| 1147 | |||
| 1148 | spin_lock_irqsave(&musb->lock, lockflags); | ||
| 1149 | |||
| 1150 | /* don't queue if the ep is down */ | ||
| 1151 | if (!musb_ep->desc) { | ||
| 1152 | DBG(4, "req %p queued to %s while ep %s\n", | ||
| 1153 | req, ep->name, "disabled"); | ||
| 1154 | status = -ESHUTDOWN; | ||
| 1155 | goto cleanup; | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | /* add request to the list */ | ||
| 1159 | list_add_tail(&(request->request.list), &(musb_ep->req_list)); | ||
| 1160 | |||
| 1161 | /* it this is the head of the queue, start i/o ... */ | ||
| 1162 | if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) | ||
| 1163 | musb_ep_restart(musb, request); | ||
| 1164 | |||
| 1165 | cleanup: | ||
| 1166 | spin_unlock_irqrestore(&musb->lock, lockflags); | ||
| 1167 | return status; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) | ||
| 1171 | { | ||
| 1172 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
| 1173 | struct usb_request *r; | ||
| 1174 | unsigned long flags; | ||
| 1175 | int status = 0; | ||
| 1176 | struct musb *musb = musb_ep->musb; | ||
| 1177 | |||
| 1178 | if (!ep || !request || to_musb_request(request)->ep != musb_ep) | ||
| 1179 | return -EINVAL; | ||
| 1180 | |||
| 1181 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1182 | |||
| 1183 | list_for_each_entry(r, &musb_ep->req_list, list) { | ||
| 1184 | if (r == request) | ||
| 1185 | break; | ||
| 1186 | } | ||
| 1187 | if (r != request) { | ||
| 1188 | DBG(3, "request %p not queued to %s\n", request, ep->name); | ||
| 1189 | status = -EINVAL; | ||
| 1190 | goto done; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | /* if the hardware doesn't have the request, easy ... */ | ||
| 1194 | if (musb_ep->req_list.next != &request->list || musb_ep->busy) | ||
| 1195 | musb_g_giveback(musb_ep, request, -ECONNRESET); | ||
| 1196 | |||
| 1197 | /* ... else abort the dma transfer ... */ | ||
| 1198 | else if (is_dma_capable() && musb_ep->dma) { | ||
| 1199 | struct dma_controller *c = musb->dma_controller; | ||
| 1200 | |||
| 1201 | musb_ep_select(musb->mregs, musb_ep->current_epnum); | ||
| 1202 | if (c->channel_abort) | ||
| 1203 | status = c->channel_abort(musb_ep->dma); | ||
| 1204 | else | ||
| 1205 | status = -EBUSY; | ||
| 1206 | if (status == 0) | ||
| 1207 | musb_g_giveback(musb_ep, request, -ECONNRESET); | ||
| 1208 | } else { | ||
| 1209 | /* NOTE: by sticking to easily tested hardware/driver states, | ||
| 1210 | * we leave counting of in-flight packets imprecise. | ||
| 1211 | */ | ||
| 1212 | musb_g_giveback(musb_ep, request, -ECONNRESET); | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | done: | ||
| 1216 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1217 | return status; | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | /* | ||
| 1221 | * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any | ||
| 1222 | * data but will queue requests. | ||
| 1223 | * | ||
| 1224 | * exported to ep0 code | ||
| 1225 | */ | ||
| 1226 | int musb_gadget_set_halt(struct usb_ep *ep, int value) | ||
| 1227 | { | ||
| 1228 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
| 1229 | u8 epnum = musb_ep->current_epnum; | ||
| 1230 | struct musb *musb = musb_ep->musb; | ||
| 1231 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
| 1232 | void __iomem *mbase; | ||
| 1233 | unsigned long flags; | ||
| 1234 | u16 csr; | ||
| 1235 | struct musb_request *request = NULL; | ||
| 1236 | int status = 0; | ||
| 1237 | |||
| 1238 | if (!ep) | ||
| 1239 | return -EINVAL; | ||
| 1240 | mbase = musb->mregs; | ||
| 1241 | |||
| 1242 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1243 | |||
| 1244 | if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { | ||
| 1245 | status = -EINVAL; | ||
| 1246 | goto done; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | musb_ep_select(mbase, epnum); | ||
| 1250 | |||
| 1251 | /* cannot portably stall with non-empty FIFO */ | ||
| 1252 | request = to_musb_request(next_request(musb_ep)); | ||
| 1253 | if (value && musb_ep->is_in) { | ||
| 1254 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1255 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
| 1256 | DBG(3, "%s fifo busy, cannot halt\n", ep->name); | ||
| 1257 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1258 | return -EAGAIN; | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | } | ||
| 1262 | |||
| 1263 | /* set/clear the stall and toggle bits */ | ||
| 1264 | DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); | ||
| 1265 | if (musb_ep->is_in) { | ||
| 1266 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1267 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
| 1268 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
| 1269 | csr |= MUSB_TXCSR_P_WZC_BITS | ||
| 1270 | | MUSB_TXCSR_CLRDATATOG; | ||
| 1271 | if (value) | ||
| 1272 | csr |= MUSB_TXCSR_P_SENDSTALL; | ||
| 1273 | else | ||
| 1274 | csr &= ~(MUSB_TXCSR_P_SENDSTALL | ||
| 1275 | | MUSB_TXCSR_P_SENTSTALL); | ||
| 1276 | csr &= ~MUSB_TXCSR_TXPKTRDY; | ||
| 1277 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 1278 | } else { | ||
| 1279 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 1280 | csr |= MUSB_RXCSR_P_WZC_BITS | ||
| 1281 | | MUSB_RXCSR_FLUSHFIFO | ||
| 1282 | | MUSB_RXCSR_CLRDATATOG; | ||
| 1283 | if (value) | ||
| 1284 | csr |= MUSB_RXCSR_P_SENDSTALL; | ||
| 1285 | else | ||
| 1286 | csr &= ~(MUSB_RXCSR_P_SENDSTALL | ||
| 1287 | | MUSB_RXCSR_P_SENTSTALL); | ||
| 1288 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | done: | ||
| 1292 | |||
| 1293 | /* maybe start the first request in the queue */ | ||
| 1294 | if (!musb_ep->busy && !value && request) { | ||
| 1295 | DBG(3, "restarting the request\n"); | ||
| 1296 | musb_ep_restart(musb, request); | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1300 | return status; | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | static int musb_gadget_fifo_status(struct usb_ep *ep) | ||
| 1304 | { | ||
| 1305 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
| 1306 | void __iomem *epio = musb_ep->hw_ep->regs; | ||
| 1307 | int retval = -EINVAL; | ||
| 1308 | |||
| 1309 | if (musb_ep->desc && !musb_ep->is_in) { | ||
| 1310 | struct musb *musb = musb_ep->musb; | ||
| 1311 | int epnum = musb_ep->current_epnum; | ||
| 1312 | void __iomem *mbase = musb->mregs; | ||
| 1313 | unsigned long flags; | ||
| 1314 | |||
| 1315 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1316 | |||
| 1317 | musb_ep_select(mbase, epnum); | ||
| 1318 | /* FIXME return zero unless RXPKTRDY is set */ | ||
| 1319 | retval = musb_readw(epio, MUSB_RXCOUNT); | ||
| 1320 | |||
| 1321 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1322 | } | ||
| 1323 | return retval; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | static void musb_gadget_fifo_flush(struct usb_ep *ep) | ||
| 1327 | { | ||
| 1328 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
| 1329 | struct musb *musb = musb_ep->musb; | ||
| 1330 | u8 epnum = musb_ep->current_epnum; | ||
| 1331 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
| 1332 | void __iomem *mbase; | ||
| 1333 | unsigned long flags; | ||
| 1334 | u16 csr, int_txe; | ||
| 1335 | |||
| 1336 | mbase = musb->mregs; | ||
| 1337 | |||
| 1338 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1339 | musb_ep_select(mbase, (u8) epnum); | ||
| 1340 | |||
| 1341 | /* disable interrupts */ | ||
| 1342 | int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
| 1343 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | ||
| 1344 | |||
| 1345 | if (musb_ep->is_in) { | ||
| 1346 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1347 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
| 1348 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; | ||
| 1349 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 1350 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | ||
| 1351 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 1352 | } | ||
| 1353 | } else { | ||
| 1354 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 1355 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; | ||
| 1356 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 1357 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | /* re-enable interrupt */ | ||
| 1361 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
| 1362 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | static const struct usb_ep_ops musb_ep_ops = { | ||
| 1366 | .enable = musb_gadget_enable, | ||
| 1367 | .disable = musb_gadget_disable, | ||
| 1368 | .alloc_request = musb_alloc_request, | ||
| 1369 | .free_request = musb_free_request, | ||
| 1370 | .queue = musb_gadget_queue, | ||
| 1371 | .dequeue = musb_gadget_dequeue, | ||
| 1372 | .set_halt = musb_gadget_set_halt, | ||
| 1373 | .fifo_status = musb_gadget_fifo_status, | ||
| 1374 | .fifo_flush = musb_gadget_fifo_flush | ||
| 1375 | }; | ||
| 1376 | |||
| 1377 | /* ----------------------------------------------------------------------- */ | ||
| 1378 | |||
| 1379 | static int musb_gadget_get_frame(struct usb_gadget *gadget) | ||
| 1380 | { | ||
| 1381 | struct musb *musb = gadget_to_musb(gadget); | ||
| 1382 | |||
| 1383 | return (int)musb_readw(musb->mregs, MUSB_FRAME); | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | static int musb_gadget_wakeup(struct usb_gadget *gadget) | ||
| 1387 | { | ||
| 1388 | struct musb *musb = gadget_to_musb(gadget); | ||
| 1389 | void __iomem *mregs = musb->mregs; | ||
| 1390 | unsigned long flags; | ||
| 1391 | int status = -EINVAL; | ||
| 1392 | u8 power, devctl; | ||
| 1393 | int retries; | ||
| 1394 | |||
| 1395 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1396 | |||
| 1397 | switch (musb->xceiv.state) { | ||
| 1398 | case OTG_STATE_B_PERIPHERAL: | ||
| 1399 | /* NOTE: OTG state machine doesn't include B_SUSPENDED; | ||
| 1400 | * that's part of the standard usb 1.1 state machine, and | ||
| 1401 | * doesn't affect OTG transitions. | ||
| 1402 | */ | ||
| 1403 | if (musb->may_wakeup && musb->is_suspended) | ||
| 1404 | break; | ||
| 1405 | goto done; | ||
| 1406 | case OTG_STATE_B_IDLE: | ||
| 1407 | /* Start SRP ... OTG not required. */ | ||
| 1408 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 1409 | DBG(2, "Sending SRP: devctl: %02x\n", devctl); | ||
| 1410 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 1411 | musb_writeb(mregs, MUSB_DEVCTL, devctl); | ||
| 1412 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 1413 | retries = 100; | ||
| 1414 | while (!(devctl & MUSB_DEVCTL_SESSION)) { | ||
| 1415 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 1416 | if (retries-- < 1) | ||
| 1417 | break; | ||
| 1418 | } | ||
| 1419 | retries = 10000; | ||
| 1420 | while (devctl & MUSB_DEVCTL_SESSION) { | ||
| 1421 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 1422 | if (retries-- < 1) | ||
| 1423 | break; | ||
| 1424 | } | ||
| 1425 | |||
| 1426 | /* Block idling for at least 1s */ | ||
| 1427 | musb_platform_try_idle(musb, | ||
| 1428 | jiffies + msecs_to_jiffies(1 * HZ)); | ||
| 1429 | |||
| 1430 | status = 0; | ||
| 1431 | goto done; | ||
| 1432 | default: | ||
| 1433 | DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); | ||
| 1434 | goto done; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | status = 0; | ||
| 1438 | |||
| 1439 | power = musb_readb(mregs, MUSB_POWER); | ||
| 1440 | power |= MUSB_POWER_RESUME; | ||
| 1441 | musb_writeb(mregs, MUSB_POWER, power); | ||
| 1442 | DBG(2, "issue wakeup\n"); | ||
| 1443 | |||
| 1444 | /* FIXME do this next chunk in a timer callback, no udelay */ | ||
| 1445 | mdelay(2); | ||
| 1446 | |||
| 1447 | power = musb_readb(mregs, MUSB_POWER); | ||
| 1448 | power &= ~MUSB_POWER_RESUME; | ||
| 1449 | musb_writeb(mregs, MUSB_POWER, power); | ||
| 1450 | done: | ||
| 1451 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1452 | return status; | ||
| 1453 | } | ||
| 1454 | |||
| 1455 | static int | ||
| 1456 | musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) | ||
| 1457 | { | ||
| 1458 | struct musb *musb = gadget_to_musb(gadget); | ||
| 1459 | |||
| 1460 | musb->is_self_powered = !!is_selfpowered; | ||
| 1461 | return 0; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | static void musb_pullup(struct musb *musb, int is_on) | ||
| 1465 | { | ||
| 1466 | u8 power; | ||
| 1467 | |||
| 1468 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
| 1469 | if (is_on) | ||
| 1470 | power |= MUSB_POWER_SOFTCONN; | ||
| 1471 | else | ||
| 1472 | power &= ~MUSB_POWER_SOFTCONN; | ||
| 1473 | |||
| 1474 | /* FIXME if on, HdrcStart; if off, HdrcStop */ | ||
| 1475 | |||
| 1476 | DBG(3, "gadget %s D+ pullup %s\n", | ||
| 1477 | musb->gadget_driver->function, is_on ? "on" : "off"); | ||
| 1478 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | #if 0 | ||
| 1482 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) | ||
| 1483 | { | ||
| 1484 | DBG(2, "<= %s =>\n", __func__); | ||
| 1485 | |||
| 1486 | /* | ||
| 1487 | * FIXME iff driver's softconnect flag is set (as it is during probe, | ||
| 1488 | * though that can clear it), just musb_pullup(). | ||
| 1489 | */ | ||
| 1490 | |||
| 1491 | return -EINVAL; | ||
| 1492 | } | ||
| 1493 | #endif | ||
| 1494 | |||
| 1495 | static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) | ||
| 1496 | { | ||
| 1497 | struct musb *musb = gadget_to_musb(gadget); | ||
| 1498 | |||
| 1499 | if (!musb->xceiv.set_power) | ||
| 1500 | return -EOPNOTSUPP; | ||
| 1501 | return otg_set_power(&musb->xceiv, mA); | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | ||
| 1505 | { | ||
| 1506 | struct musb *musb = gadget_to_musb(gadget); | ||
| 1507 | unsigned long flags; | ||
| 1508 | |||
| 1509 | is_on = !!is_on; | ||
| 1510 | |||
| 1511 | /* NOTE: this assumes we are sensing vbus; we'd rather | ||
| 1512 | * not pullup unless the B-session is active. | ||
| 1513 | */ | ||
| 1514 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1515 | if (is_on != musb->softconnect) { | ||
| 1516 | musb->softconnect = is_on; | ||
| 1517 | musb_pullup(musb, is_on); | ||
| 1518 | } | ||
| 1519 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1520 | return 0; | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | static const struct usb_gadget_ops musb_gadget_operations = { | ||
| 1524 | .get_frame = musb_gadget_get_frame, | ||
| 1525 | .wakeup = musb_gadget_wakeup, | ||
| 1526 | .set_selfpowered = musb_gadget_set_self_powered, | ||
| 1527 | /* .vbus_session = musb_gadget_vbus_session, */ | ||
| 1528 | .vbus_draw = musb_gadget_vbus_draw, | ||
| 1529 | .pullup = musb_gadget_pullup, | ||
| 1530 | }; | ||
| 1531 | |||
| 1532 | /* ----------------------------------------------------------------------- */ | ||
| 1533 | |||
| 1534 | /* Registration */ | ||
| 1535 | |||
| 1536 | /* Only this registration code "knows" the rule (from USB standards) | ||
| 1537 | * about there being only one external upstream port. It assumes | ||
| 1538 | * all peripheral ports are external... | ||
| 1539 | */ | ||
| 1540 | static struct musb *the_gadget; | ||
| 1541 | |||
| 1542 | static void musb_gadget_release(struct device *dev) | ||
| 1543 | { | ||
| 1544 | /* kref_put(WHAT) */ | ||
| 1545 | dev_dbg(dev, "%s\n", __func__); | ||
| 1546 | } | ||
| 1547 | |||
| 1548 | |||
| 1549 | static void __init | ||
| 1550 | init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) | ||
| 1551 | { | ||
| 1552 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
| 1553 | |||
| 1554 | memset(ep, 0, sizeof *ep); | ||
| 1555 | |||
| 1556 | ep->current_epnum = epnum; | ||
| 1557 | ep->musb = musb; | ||
| 1558 | ep->hw_ep = hw_ep; | ||
| 1559 | ep->is_in = is_in; | ||
| 1560 | |||
| 1561 | INIT_LIST_HEAD(&ep->req_list); | ||
| 1562 | |||
| 1563 | sprintf(ep->name, "ep%d%s", epnum, | ||
| 1564 | (!epnum || hw_ep->is_shared_fifo) ? "" : ( | ||
| 1565 | is_in ? "in" : "out")); | ||
| 1566 | ep->end_point.name = ep->name; | ||
| 1567 | INIT_LIST_HEAD(&ep->end_point.ep_list); | ||
| 1568 | if (!epnum) { | ||
| 1569 | ep->end_point.maxpacket = 64; | ||
| 1570 | ep->end_point.ops = &musb_g_ep0_ops; | ||
| 1571 | musb->g.ep0 = &ep->end_point; | ||
| 1572 | } else { | ||
| 1573 | if (is_in) | ||
| 1574 | ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; | ||
| 1575 | else | ||
| 1576 | ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; | ||
| 1577 | ep->end_point.ops = &musb_ep_ops; | ||
| 1578 | list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); | ||
| 1579 | } | ||
| 1580 | } | ||
| 1581 | |||
| 1582 | /* | ||
| 1583 | * Initialize the endpoints exposed to peripheral drivers, with backlinks | ||
| 1584 | * to the rest of the driver state. | ||
| 1585 | */ | ||
| 1586 | static inline void __init musb_g_init_endpoints(struct musb *musb) | ||
| 1587 | { | ||
| 1588 | u8 epnum; | ||
| 1589 | struct musb_hw_ep *hw_ep; | ||
| 1590 | unsigned count = 0; | ||
| 1591 | |||
| 1592 | /* intialize endpoint list just once */ | ||
| 1593 | INIT_LIST_HEAD(&(musb->g.ep_list)); | ||
| 1594 | |||
| 1595 | for (epnum = 0, hw_ep = musb->endpoints; | ||
| 1596 | epnum < musb->nr_endpoints; | ||
| 1597 | epnum++, hw_ep++) { | ||
| 1598 | if (hw_ep->is_shared_fifo /* || !epnum */) { | ||
| 1599 | init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); | ||
| 1600 | count++; | ||
| 1601 | } else { | ||
| 1602 | if (hw_ep->max_packet_sz_tx) { | ||
| 1603 | init_peripheral_ep(musb, &hw_ep->ep_in, | ||
| 1604 | epnum, 1); | ||
| 1605 | count++; | ||
| 1606 | } | ||
| 1607 | if (hw_ep->max_packet_sz_rx) { | ||
| 1608 | init_peripheral_ep(musb, &hw_ep->ep_out, | ||
| 1609 | epnum, 0); | ||
| 1610 | count++; | ||
| 1611 | } | ||
| 1612 | } | ||
| 1613 | } | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | /* called once during driver setup to initialize and link into | ||
| 1617 | * the driver model; memory is zeroed. | ||
| 1618 | */ | ||
| 1619 | int __init musb_gadget_setup(struct musb *musb) | ||
| 1620 | { | ||
| 1621 | int status; | ||
| 1622 | |||
| 1623 | /* REVISIT minor race: if (erroneously) setting up two | ||
| 1624 | * musb peripherals at the same time, only the bus lock | ||
| 1625 | * is probably held. | ||
| 1626 | */ | ||
| 1627 | if (the_gadget) | ||
| 1628 | return -EBUSY; | ||
| 1629 | the_gadget = musb; | ||
| 1630 | |||
| 1631 | musb->g.ops = &musb_gadget_operations; | ||
| 1632 | musb->g.is_dualspeed = 1; | ||
| 1633 | musb->g.speed = USB_SPEED_UNKNOWN; | ||
| 1634 | |||
| 1635 | /* this "gadget" abstracts/virtualizes the controller */ | ||
| 1636 | strcpy(musb->g.dev.bus_id, "gadget"); | ||
| 1637 | musb->g.dev.parent = musb->controller; | ||
| 1638 | musb->g.dev.dma_mask = musb->controller->dma_mask; | ||
| 1639 | musb->g.dev.release = musb_gadget_release; | ||
| 1640 | musb->g.name = musb_driver_name; | ||
| 1641 | |||
| 1642 | if (is_otg_enabled(musb)) | ||
| 1643 | musb->g.is_otg = 1; | ||
| 1644 | |||
| 1645 | musb_g_init_endpoints(musb); | ||
| 1646 | |||
| 1647 | musb->is_active = 0; | ||
| 1648 | musb_platform_try_idle(musb, 0); | ||
| 1649 | |||
| 1650 | status = device_register(&musb->g.dev); | ||
| 1651 | if (status != 0) | ||
| 1652 | the_gadget = NULL; | ||
| 1653 | return status; | ||
| 1654 | } | ||
| 1655 | |||
| 1656 | void musb_gadget_cleanup(struct musb *musb) | ||
| 1657 | { | ||
| 1658 | if (musb != the_gadget) | ||
| 1659 | return; | ||
| 1660 | |||
| 1661 | device_unregister(&musb->g.dev); | ||
| 1662 | the_gadget = NULL; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | /* | ||
| 1666 | * Register the gadget driver. Used by gadget drivers when | ||
| 1667 | * registering themselves with the controller. | ||
| 1668 | * | ||
| 1669 | * -EINVAL something went wrong (not driver) | ||
| 1670 | * -EBUSY another gadget is already using the controller | ||
| 1671 | * -ENOMEM no memeory to perform the operation | ||
| 1672 | * | ||
| 1673 | * @param driver the gadget driver | ||
| 1674 | * @return <0 if error, 0 if everything is fine | ||
| 1675 | */ | ||
| 1676 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | ||
| 1677 | { | ||
| 1678 | int retval; | ||
| 1679 | unsigned long flags; | ||
| 1680 | struct musb *musb = the_gadget; | ||
| 1681 | |||
| 1682 | if (!driver | ||
| 1683 | || driver->speed != USB_SPEED_HIGH | ||
| 1684 | || !driver->bind | ||
| 1685 | || !driver->setup) | ||
| 1686 | return -EINVAL; | ||
| 1687 | |||
| 1688 | /* driver must be initialized to support peripheral mode */ | ||
| 1689 | if (!musb || !(musb->board_mode == MUSB_OTG | ||
| 1690 | || musb->board_mode != MUSB_OTG)) { | ||
| 1691 | DBG(1, "%s, no dev??\n", __func__); | ||
| 1692 | return -ENODEV; | ||
| 1693 | } | ||
| 1694 | |||
| 1695 | DBG(3, "registering driver %s\n", driver->function); | ||
| 1696 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1697 | |||
| 1698 | if (musb->gadget_driver) { | ||
| 1699 | DBG(1, "%s is already bound to %s\n", | ||
| 1700 | musb_driver_name, | ||
| 1701 | musb->gadget_driver->driver.name); | ||
| 1702 | retval = -EBUSY; | ||
| 1703 | } else { | ||
| 1704 | musb->gadget_driver = driver; | ||
| 1705 | musb->g.dev.driver = &driver->driver; | ||
| 1706 | driver->driver.bus = NULL; | ||
| 1707 | musb->softconnect = 1; | ||
| 1708 | retval = 0; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1712 | |||
| 1713 | if (retval == 0) { | ||
| 1714 | retval = driver->bind(&musb->g); | ||
| 1715 | if (retval != 0) { | ||
| 1716 | DBG(3, "bind to driver %s failed --> %d\n", | ||
| 1717 | driver->driver.name, retval); | ||
| 1718 | musb->gadget_driver = NULL; | ||
| 1719 | musb->g.dev.driver = NULL; | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1723 | |||
| 1724 | /* REVISIT always use otg_set_peripheral(), handling | ||
| 1725 | * issues including the root hub one below ... | ||
| 1726 | */ | ||
| 1727 | musb->xceiv.gadget = &musb->g; | ||
| 1728 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 1729 | musb->is_active = 1; | ||
| 1730 | |||
| 1731 | /* FIXME this ignores the softconnect flag. Drivers are | ||
| 1732 | * allowed hold the peripheral inactive until for example | ||
| 1733 | * userspace hooks up printer hardware or DSP codecs, so | ||
| 1734 | * hosts only see fully functional devices. | ||
| 1735 | */ | ||
| 1736 | |||
| 1737 | if (!is_otg_enabled(musb)) | ||
| 1738 | musb_start(musb); | ||
| 1739 | |||
| 1740 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1741 | |||
| 1742 | if (is_otg_enabled(musb)) { | ||
| 1743 | DBG(3, "OTG startup...\n"); | ||
| 1744 | |||
| 1745 | /* REVISIT: funcall to other code, which also | ||
| 1746 | * handles power budgeting ... this way also | ||
| 1747 | * ensures HdrcStart is indirectly called. | ||
| 1748 | */ | ||
| 1749 | retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); | ||
| 1750 | if (retval < 0) { | ||
| 1751 | DBG(1, "add_hcd failed, %d\n", retval); | ||
| 1752 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1753 | musb->xceiv.gadget = NULL; | ||
| 1754 | musb->xceiv.state = OTG_STATE_UNDEFINED; | ||
| 1755 | musb->gadget_driver = NULL; | ||
| 1756 | musb->g.dev.driver = NULL; | ||
| 1757 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1758 | } | ||
| 1759 | } | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | return retval; | ||
| 1763 | } | ||
| 1764 | EXPORT_SYMBOL(usb_gadget_register_driver); | ||
| 1765 | |||
| 1766 | static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) | ||
| 1767 | { | ||
| 1768 | int i; | ||
| 1769 | struct musb_hw_ep *hw_ep; | ||
| 1770 | |||
| 1771 | /* don't disconnect if it's not connected */ | ||
| 1772 | if (musb->g.speed == USB_SPEED_UNKNOWN) | ||
| 1773 | driver = NULL; | ||
| 1774 | else | ||
| 1775 | musb->g.speed = USB_SPEED_UNKNOWN; | ||
| 1776 | |||
| 1777 | /* deactivate the hardware */ | ||
| 1778 | if (musb->softconnect) { | ||
| 1779 | musb->softconnect = 0; | ||
| 1780 | musb_pullup(musb, 0); | ||
| 1781 | } | ||
| 1782 | musb_stop(musb); | ||
| 1783 | |||
| 1784 | /* killing any outstanding requests will quiesce the driver; | ||
| 1785 | * then report disconnect | ||
| 1786 | */ | ||
| 1787 | if (driver) { | ||
| 1788 | for (i = 0, hw_ep = musb->endpoints; | ||
| 1789 | i < musb->nr_endpoints; | ||
| 1790 | i++, hw_ep++) { | ||
| 1791 | musb_ep_select(musb->mregs, i); | ||
| 1792 | if (hw_ep->is_shared_fifo /* || !epnum */) { | ||
| 1793 | nuke(&hw_ep->ep_in, -ESHUTDOWN); | ||
| 1794 | } else { | ||
| 1795 | if (hw_ep->max_packet_sz_tx) | ||
| 1796 | nuke(&hw_ep->ep_in, -ESHUTDOWN); | ||
| 1797 | if (hw_ep->max_packet_sz_rx) | ||
| 1798 | nuke(&hw_ep->ep_out, -ESHUTDOWN); | ||
| 1799 | } | ||
| 1800 | } | ||
| 1801 | |||
| 1802 | spin_unlock(&musb->lock); | ||
| 1803 | driver->disconnect(&musb->g); | ||
| 1804 | spin_lock(&musb->lock); | ||
| 1805 | } | ||
| 1806 | } | ||
| 1807 | |||
| 1808 | /* | ||
| 1809 | * Unregister the gadget driver. Used by gadget drivers when | ||
| 1810 | * unregistering themselves from the controller. | ||
| 1811 | * | ||
| 1812 | * @param driver the gadget driver to unregister | ||
| 1813 | */ | ||
| 1814 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | ||
| 1815 | { | ||
| 1816 | unsigned long flags; | ||
| 1817 | int retval = 0; | ||
| 1818 | struct musb *musb = the_gadget; | ||
| 1819 | |||
| 1820 | if (!driver || !driver->unbind || !musb) | ||
| 1821 | return -EINVAL; | ||
| 1822 | |||
| 1823 | /* REVISIT always use otg_set_peripheral() here too; | ||
| 1824 | * this needs to shut down the OTG engine. | ||
| 1825 | */ | ||
| 1826 | |||
| 1827 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1828 | |||
| 1829 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 1830 | musb_hnp_stop(musb); | ||
| 1831 | #endif | ||
| 1832 | |||
| 1833 | if (musb->gadget_driver == driver) { | ||
| 1834 | |||
| 1835 | (void) musb_gadget_vbus_draw(&musb->g, 0); | ||
| 1836 | |||
| 1837 | musb->xceiv.state = OTG_STATE_UNDEFINED; | ||
| 1838 | stop_activity(musb, driver); | ||
| 1839 | |||
| 1840 | DBG(3, "unregistering driver %s\n", driver->function); | ||
| 1841 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1842 | driver->unbind(&musb->g); | ||
| 1843 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1844 | |||
| 1845 | musb->gadget_driver = NULL; | ||
| 1846 | musb->g.dev.driver = NULL; | ||
| 1847 | |||
| 1848 | musb->is_active = 0; | ||
| 1849 | musb_platform_try_idle(musb, 0); | ||
| 1850 | } else | ||
| 1851 | retval = -EINVAL; | ||
| 1852 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1853 | |||
| 1854 | if (is_otg_enabled(musb) && retval == 0) { | ||
| 1855 | usb_remove_hcd(musb_to_hcd(musb)); | ||
| 1856 | /* FIXME we need to be able to register another | ||
| 1857 | * gadget driver here and have everything work; | ||
| 1858 | * that currently misbehaves. | ||
| 1859 | */ | ||
| 1860 | } | ||
| 1861 | |||
| 1862 | return retval; | ||
| 1863 | } | ||
| 1864 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | ||
| 1865 | |||
| 1866 | |||
| 1867 | /* ----------------------------------------------------------------------- */ | ||
| 1868 | |||
| 1869 | /* lifecycle operations called through plat_uds.c */ | ||
| 1870 | |||
| 1871 | void musb_g_resume(struct musb *musb) | ||
| 1872 | { | ||
| 1873 | musb->is_suspended = 0; | ||
| 1874 | switch (musb->xceiv.state) { | ||
| 1875 | case OTG_STATE_B_IDLE: | ||
| 1876 | break; | ||
| 1877 | case OTG_STATE_B_WAIT_ACON: | ||
| 1878 | case OTG_STATE_B_PERIPHERAL: | ||
| 1879 | musb->is_active = 1; | ||
| 1880 | if (musb->gadget_driver && musb->gadget_driver->resume) { | ||
| 1881 | spin_unlock(&musb->lock); | ||
| 1882 | musb->gadget_driver->resume(&musb->g); | ||
| 1883 | spin_lock(&musb->lock); | ||
| 1884 | } | ||
| 1885 | break; | ||
| 1886 | default: | ||
| 1887 | WARNING("unhandled RESUME transition (%s)\n", | ||
| 1888 | otg_state_string(musb)); | ||
| 1889 | } | ||
| 1890 | } | ||
| 1891 | |||
| 1892 | /* called when SOF packets stop for 3+ msec */ | ||
| 1893 | void musb_g_suspend(struct musb *musb) | ||
| 1894 | { | ||
| 1895 | u8 devctl; | ||
| 1896 | |||
| 1897 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 1898 | DBG(3, "devctl %02x\n", devctl); | ||
| 1899 | |||
| 1900 | switch (musb->xceiv.state) { | ||
| 1901 | case OTG_STATE_B_IDLE: | ||
| 1902 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | ||
| 1903 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 1904 | break; | ||
| 1905 | case OTG_STATE_B_PERIPHERAL: | ||
| 1906 | musb->is_suspended = 1; | ||
| 1907 | if (musb->gadget_driver && musb->gadget_driver->suspend) { | ||
| 1908 | spin_unlock(&musb->lock); | ||
| 1909 | musb->gadget_driver->suspend(&musb->g); | ||
| 1910 | spin_lock(&musb->lock); | ||
| 1911 | } | ||
| 1912 | break; | ||
| 1913 | default: | ||
| 1914 | /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; | ||
| 1915 | * A_PERIPHERAL may need care too | ||
| 1916 | */ | ||
| 1917 | WARNING("unhandled SUSPEND transition (%s)\n", | ||
| 1918 | otg_state_string(musb)); | ||
| 1919 | } | ||
| 1920 | } | ||
| 1921 | |||
| 1922 | /* Called during SRP */ | ||
| 1923 | void musb_g_wakeup(struct musb *musb) | ||
| 1924 | { | ||
| 1925 | musb_gadget_wakeup(&musb->g); | ||
| 1926 | } | ||
| 1927 | |||
| 1928 | /* called when VBUS drops below session threshold, and in other cases */ | ||
| 1929 | void musb_g_disconnect(struct musb *musb) | ||
| 1930 | { | ||
| 1931 | void __iomem *mregs = musb->mregs; | ||
| 1932 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
| 1933 | |||
| 1934 | DBG(3, "devctl %02x\n", devctl); | ||
| 1935 | |||
| 1936 | /* clear HR */ | ||
| 1937 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); | ||
| 1938 | |||
| 1939 | /* don't draw vbus until new b-default session */ | ||
| 1940 | (void) musb_gadget_vbus_draw(&musb->g, 0); | ||
| 1941 | |||
| 1942 | musb->g.speed = USB_SPEED_UNKNOWN; | ||
| 1943 | if (musb->gadget_driver && musb->gadget_driver->disconnect) { | ||
| 1944 | spin_unlock(&musb->lock); | ||
| 1945 | musb->gadget_driver->disconnect(&musb->g); | ||
| 1946 | spin_lock(&musb->lock); | ||
| 1947 | } | ||
| 1948 | |||
| 1949 | switch (musb->xceiv.state) { | ||
| 1950 | default: | ||
| 1951 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 1952 | DBG(2, "Unhandled disconnect %s, setting a_idle\n", | ||
| 1953 | otg_state_string(musb)); | ||
| 1954 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 1955 | break; | ||
| 1956 | case OTG_STATE_A_PERIPHERAL: | ||
| 1957 | musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; | ||
| 1958 | break; | ||
| 1959 | case OTG_STATE_B_WAIT_ACON: | ||
| 1960 | case OTG_STATE_B_HOST: | ||
| 1961 | #endif | ||
| 1962 | case OTG_STATE_B_PERIPHERAL: | ||
| 1963 | case OTG_STATE_B_IDLE: | ||
| 1964 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 1965 | break; | ||
| 1966 | case OTG_STATE_B_SRP_INIT: | ||
| 1967 | break; | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | musb->is_active = 0; | ||
| 1971 | } | ||
| 1972 | |||
| 1973 | void musb_g_reset(struct musb *musb) | ||
| 1974 | __releases(musb->lock) | ||
| 1975 | __acquires(musb->lock) | ||
| 1976 | { | ||
| 1977 | void __iomem *mbase = musb->mregs; | ||
| 1978 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); | ||
| 1979 | u8 power; | ||
| 1980 | |||
| 1981 | DBG(3, "<== %s addr=%x driver '%s'\n", | ||
| 1982 | (devctl & MUSB_DEVCTL_BDEVICE) | ||
| 1983 | ? "B-Device" : "A-Device", | ||
| 1984 | musb_readb(mbase, MUSB_FADDR), | ||
| 1985 | musb->gadget_driver | ||
| 1986 | ? musb->gadget_driver->driver.name | ||
| 1987 | : NULL | ||
| 1988 | ); | ||
| 1989 | |||
| 1990 | /* report disconnect, if we didn't already (flushing EP state) */ | ||
| 1991 | if (musb->g.speed != USB_SPEED_UNKNOWN) | ||
| 1992 | musb_g_disconnect(musb); | ||
| 1993 | |||
| 1994 | /* clear HR */ | ||
| 1995 | else if (devctl & MUSB_DEVCTL_HR) | ||
| 1996 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); | ||
| 1997 | |||
| 1998 | |||
| 1999 | /* what speed did we negotiate? */ | ||
| 2000 | power = musb_readb(mbase, MUSB_POWER); | ||
| 2001 | musb->g.speed = (power & MUSB_POWER_HSMODE) | ||
| 2002 | ? USB_SPEED_HIGH : USB_SPEED_FULL; | ||
| 2003 | |||
| 2004 | /* start in USB_STATE_DEFAULT */ | ||
| 2005 | musb->is_active = 1; | ||
| 2006 | musb->is_suspended = 0; | ||
| 2007 | MUSB_DEV_MODE(musb); | ||
| 2008 | musb->address = 0; | ||
| 2009 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 2010 | |||
| 2011 | musb->may_wakeup = 0; | ||
| 2012 | musb->g.b_hnp_enable = 0; | ||
| 2013 | musb->g.a_alt_hnp_support = 0; | ||
| 2014 | musb->g.a_hnp_support = 0; | ||
| 2015 | |||
| 2016 | /* Normal reset, as B-Device; | ||
| 2017 | * or else after HNP, as A-Device | ||
| 2018 | */ | ||
| 2019 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
| 2020 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
| 2021 | musb->g.is_a_peripheral = 0; | ||
| 2022 | } else if (is_otg_enabled(musb)) { | ||
| 2023 | musb->xceiv.state = OTG_STATE_A_PERIPHERAL; | ||
| 2024 | musb->g.is_a_peripheral = 1; | ||
| 2025 | } else | ||
| 2026 | WARN_ON(1); | ||
| 2027 | |||
| 2028 | /* start with default limits on VBUS power draw */ | ||
| 2029 | (void) musb_gadget_vbus_draw(&musb->g, | ||
| 2030 | is_otg_enabled(musb) ? 8 : 100); | ||
| 2031 | } | ||
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h new file mode 100644 index 000000000000..59502da9f739 --- /dev/null +++ b/drivers/usb/musb/musb_gadget.h | |||
| @@ -0,0 +1,108 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver peripheral defines | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __MUSB_GADGET_H | ||
| 36 | #define __MUSB_GADGET_H | ||
| 37 | |||
| 38 | struct musb_request { | ||
| 39 | struct usb_request request; | ||
| 40 | struct musb_ep *ep; | ||
| 41 | struct musb *musb; | ||
| 42 | u8 tx; /* endpoint direction */ | ||
| 43 | u8 epnum; | ||
| 44 | u8 mapped; | ||
| 45 | }; | ||
| 46 | |||
| 47 | static inline struct musb_request *to_musb_request(struct usb_request *req) | ||
| 48 | { | ||
| 49 | return req ? container_of(req, struct musb_request, request) : NULL; | ||
| 50 | } | ||
| 51 | |||
| 52 | extern struct usb_request * | ||
| 53 | musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); | ||
| 54 | extern void musb_free_request(struct usb_ep *ep, struct usb_request *req); | ||
| 55 | |||
| 56 | |||
| 57 | /* | ||
| 58 | * struct musb_ep - peripheral side view of endpoint rx or tx side | ||
| 59 | */ | ||
| 60 | struct musb_ep { | ||
| 61 | /* stuff towards the head is basically write-once. */ | ||
| 62 | struct usb_ep end_point; | ||
| 63 | char name[12]; | ||
| 64 | struct musb_hw_ep *hw_ep; | ||
| 65 | struct musb *musb; | ||
| 66 | u8 current_epnum; | ||
| 67 | |||
| 68 | /* ... when enabled/disabled ... */ | ||
| 69 | u8 type; | ||
| 70 | u8 is_in; | ||
| 71 | u16 packet_sz; | ||
| 72 | const struct usb_endpoint_descriptor *desc; | ||
| 73 | struct dma_channel *dma; | ||
| 74 | |||
| 75 | /* later things are modified based on usage */ | ||
| 76 | struct list_head req_list; | ||
| 77 | |||
| 78 | /* true if lock must be dropped but req_list may not be advanced */ | ||
| 79 | u8 busy; | ||
| 80 | }; | ||
| 81 | |||
| 82 | static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) | ||
| 83 | { | ||
| 84 | return ep ? container_of(ep, struct musb_ep, end_point) : NULL; | ||
| 85 | } | ||
| 86 | |||
| 87 | static inline struct usb_request *next_request(struct musb_ep *ep) | ||
| 88 | { | ||
| 89 | struct list_head *queue = &ep->req_list; | ||
| 90 | |||
| 91 | if (list_empty(queue)) | ||
| 92 | return NULL; | ||
| 93 | return container_of(queue->next, struct usb_request, list); | ||
| 94 | } | ||
| 95 | |||
| 96 | extern void musb_g_tx(struct musb *musb, u8 epnum); | ||
| 97 | extern void musb_g_rx(struct musb *musb, u8 epnum); | ||
| 98 | |||
| 99 | extern const struct usb_ep_ops musb_g_ep0_ops; | ||
| 100 | |||
| 101 | extern int musb_gadget_setup(struct musb *); | ||
| 102 | extern void musb_gadget_cleanup(struct musb *); | ||
| 103 | |||
| 104 | extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); | ||
| 105 | |||
| 106 | extern int musb_gadget_set_halt(struct usb_ep *ep, int value); | ||
| 107 | |||
| 108 | #endif /* __MUSB_GADGET_H */ | ||
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c new file mode 100644 index 000000000000..48d7d3ccb243 --- /dev/null +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
| @@ -0,0 +1,981 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG peripheral driver ep0 handling | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/kernel.h> | ||
| 36 | #include <linux/list.h> | ||
| 37 | #include <linux/timer.h> | ||
| 38 | #include <linux/spinlock.h> | ||
| 39 | #include <linux/init.h> | ||
| 40 | #include <linux/device.h> | ||
| 41 | #include <linux/interrupt.h> | ||
| 42 | |||
| 43 | #include "musb_core.h" | ||
| 44 | |||
| 45 | /* ep0 is always musb->endpoints[0].ep_in */ | ||
| 46 | #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) | ||
| 47 | |||
| 48 | /* | ||
| 49 | * locking note: we use only the controller lock, for simpler correctness. | ||
| 50 | * It's always held with IRQs blocked. | ||
| 51 | * | ||
| 52 | * It protects the ep0 request queue as well as ep0_state, not just the | ||
| 53 | * controller and indexed registers. And that lock stays held unless it | ||
| 54 | * needs to be dropped to allow reentering this driver ... like upcalls to | ||
| 55 | * the gadget driver, or adjusting endpoint halt status. | ||
| 56 | */ | ||
| 57 | |||
| 58 | static char *decode_ep0stage(u8 stage) | ||
| 59 | { | ||
| 60 | switch (stage) { | ||
| 61 | case MUSB_EP0_STAGE_SETUP: return "idle"; | ||
| 62 | case MUSB_EP0_STAGE_TX: return "in"; | ||
| 63 | case MUSB_EP0_STAGE_RX: return "out"; | ||
| 64 | case MUSB_EP0_STAGE_ACKWAIT: return "wait"; | ||
| 65 | case MUSB_EP0_STAGE_STATUSIN: return "in/status"; | ||
| 66 | case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; | ||
| 67 | default: return "?"; | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 71 | /* handle a standard GET_STATUS request | ||
| 72 | * Context: caller holds controller lock | ||
| 73 | */ | ||
| 74 | static int service_tx_status_request( | ||
| 75 | struct musb *musb, | ||
| 76 | const struct usb_ctrlrequest *ctrlrequest) | ||
| 77 | { | ||
| 78 | void __iomem *mbase = musb->mregs; | ||
| 79 | int handled = 1; | ||
| 80 | u8 result[2], epnum = 0; | ||
| 81 | const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; | ||
| 82 | |||
| 83 | result[1] = 0; | ||
| 84 | |||
| 85 | switch (recip) { | ||
| 86 | case USB_RECIP_DEVICE: | ||
| 87 | result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; | ||
| 88 | result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; | ||
| 89 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 90 | if (musb->g.is_otg) { | ||
| 91 | result[0] |= musb->g.b_hnp_enable | ||
| 92 | << USB_DEVICE_B_HNP_ENABLE; | ||
| 93 | result[0] |= musb->g.a_alt_hnp_support | ||
| 94 | << USB_DEVICE_A_ALT_HNP_SUPPORT; | ||
| 95 | result[0] |= musb->g.a_hnp_support | ||
| 96 | << USB_DEVICE_A_HNP_SUPPORT; | ||
| 97 | } | ||
| 98 | #endif | ||
| 99 | break; | ||
| 100 | |||
| 101 | case USB_RECIP_INTERFACE: | ||
| 102 | result[0] = 0; | ||
| 103 | break; | ||
| 104 | |||
| 105 | case USB_RECIP_ENDPOINT: { | ||
| 106 | int is_in; | ||
| 107 | struct musb_ep *ep; | ||
| 108 | u16 tmp; | ||
| 109 | void __iomem *regs; | ||
| 110 | |||
| 111 | epnum = (u8) ctrlrequest->wIndex; | ||
| 112 | if (!epnum) { | ||
| 113 | result[0] = 0; | ||
| 114 | break; | ||
| 115 | } | ||
| 116 | |||
| 117 | is_in = epnum & USB_DIR_IN; | ||
| 118 | if (is_in) { | ||
| 119 | epnum &= 0x0f; | ||
| 120 | ep = &musb->endpoints[epnum].ep_in; | ||
| 121 | } else { | ||
| 122 | ep = &musb->endpoints[epnum].ep_out; | ||
| 123 | } | ||
| 124 | regs = musb->endpoints[epnum].regs; | ||
| 125 | |||
| 126 | if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { | ||
| 127 | handled = -EINVAL; | ||
| 128 | break; | ||
| 129 | } | ||
| 130 | |||
| 131 | musb_ep_select(mbase, epnum); | ||
| 132 | if (is_in) | ||
| 133 | tmp = musb_readw(regs, MUSB_TXCSR) | ||
| 134 | & MUSB_TXCSR_P_SENDSTALL; | ||
| 135 | else | ||
| 136 | tmp = musb_readw(regs, MUSB_RXCSR) | ||
| 137 | & MUSB_RXCSR_P_SENDSTALL; | ||
| 138 | musb_ep_select(mbase, 0); | ||
| 139 | |||
| 140 | result[0] = tmp ? 1 : 0; | ||
| 141 | } break; | ||
| 142 | |||
| 143 | default: | ||
| 144 | /* class, vendor, etc ... delegate */ | ||
| 145 | handled = 0; | ||
| 146 | break; | ||
| 147 | } | ||
| 148 | |||
| 149 | /* fill up the fifo; caller updates csr0 */ | ||
| 150 | if (handled > 0) { | ||
| 151 | u16 len = le16_to_cpu(ctrlrequest->wLength); | ||
| 152 | |||
| 153 | if (len > 2) | ||
| 154 | len = 2; | ||
| 155 | musb_write_fifo(&musb->endpoints[0], len, result); | ||
| 156 | } | ||
| 157 | |||
| 158 | return handled; | ||
| 159 | } | ||
| 160 | |||
| 161 | /* | ||
| 162 | * handle a control-IN request, the end0 buffer contains the current request | ||
| 163 | * that is supposed to be a standard control request. Assumes the fifo to | ||
| 164 | * be at least 2 bytes long. | ||
| 165 | * | ||
| 166 | * @return 0 if the request was NOT HANDLED, | ||
| 167 | * < 0 when error | ||
| 168 | * > 0 when the request is processed | ||
| 169 | * | ||
| 170 | * Context: caller holds controller lock | ||
| 171 | */ | ||
| 172 | static int | ||
| 173 | service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) | ||
| 174 | { | ||
| 175 | int handled = 0; /* not handled */ | ||
| 176 | |||
| 177 | if ((ctrlrequest->bRequestType & USB_TYPE_MASK) | ||
| 178 | == USB_TYPE_STANDARD) { | ||
| 179 | switch (ctrlrequest->bRequest) { | ||
| 180 | case USB_REQ_GET_STATUS: | ||
| 181 | handled = service_tx_status_request(musb, | ||
| 182 | ctrlrequest); | ||
| 183 | break; | ||
| 184 | |||
| 185 | /* case USB_REQ_SYNC_FRAME: */ | ||
| 186 | |||
| 187 | default: | ||
| 188 | break; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | return handled; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* | ||
| 195 | * Context: caller holds controller lock | ||
| 196 | */ | ||
| 197 | static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) | ||
| 198 | { | ||
| 199 | musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); | ||
| 200 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 201 | } | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Tries to start B-device HNP negotiation if enabled via sysfs | ||
| 205 | */ | ||
| 206 | static inline void musb_try_b_hnp_enable(struct musb *musb) | ||
| 207 | { | ||
| 208 | void __iomem *mbase = musb->mregs; | ||
| 209 | u8 devctl; | ||
| 210 | |||
| 211 | DBG(1, "HNP: Setting HR\n"); | ||
| 212 | devctl = musb_readb(mbase, MUSB_DEVCTL); | ||
| 213 | musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); | ||
| 214 | } | ||
| 215 | |||
| 216 | /* | ||
| 217 | * Handle all control requests with no DATA stage, including standard | ||
| 218 | * requests such as: | ||
| 219 | * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized | ||
| 220 | * always delegated to the gadget driver | ||
| 221 | * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE | ||
| 222 | * always handled here, except for class/vendor/... features | ||
| 223 | * | ||
| 224 | * Context: caller holds controller lock | ||
| 225 | */ | ||
| 226 | static int | ||
| 227 | service_zero_data_request(struct musb *musb, | ||
| 228 | struct usb_ctrlrequest *ctrlrequest) | ||
| 229 | __releases(musb->lock) | ||
| 230 | __acquires(musb->lock) | ||
| 231 | { | ||
| 232 | int handled = -EINVAL; | ||
| 233 | void __iomem *mbase = musb->mregs; | ||
| 234 | const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; | ||
| 235 | |||
| 236 | /* the gadget driver handles everything except what we MUST handle */ | ||
| 237 | if ((ctrlrequest->bRequestType & USB_TYPE_MASK) | ||
| 238 | == USB_TYPE_STANDARD) { | ||
| 239 | switch (ctrlrequest->bRequest) { | ||
| 240 | case USB_REQ_SET_ADDRESS: | ||
| 241 | /* change it after the status stage */ | ||
| 242 | musb->set_address = true; | ||
| 243 | musb->address = (u8) (ctrlrequest->wValue & 0x7f); | ||
| 244 | handled = 1; | ||
| 245 | break; | ||
| 246 | |||
| 247 | case USB_REQ_CLEAR_FEATURE: | ||
| 248 | switch (recip) { | ||
| 249 | case USB_RECIP_DEVICE: | ||
| 250 | if (ctrlrequest->wValue | ||
| 251 | != USB_DEVICE_REMOTE_WAKEUP) | ||
| 252 | break; | ||
| 253 | musb->may_wakeup = 0; | ||
| 254 | handled = 1; | ||
| 255 | break; | ||
| 256 | case USB_RECIP_INTERFACE: | ||
| 257 | break; | ||
| 258 | case USB_RECIP_ENDPOINT:{ | ||
| 259 | const u8 num = ctrlrequest->wIndex & 0x0f; | ||
| 260 | struct musb_ep *musb_ep; | ||
| 261 | |||
| 262 | if (num == 0 | ||
| 263 | || num >= MUSB_C_NUM_EPS | ||
| 264 | || ctrlrequest->wValue | ||
| 265 | != USB_ENDPOINT_HALT) | ||
| 266 | break; | ||
| 267 | |||
| 268 | if (ctrlrequest->wIndex & USB_DIR_IN) | ||
| 269 | musb_ep = &musb->endpoints[num].ep_in; | ||
| 270 | else | ||
| 271 | musb_ep = &musb->endpoints[num].ep_out; | ||
| 272 | if (!musb_ep->desc) | ||
| 273 | break; | ||
| 274 | |||
| 275 | /* REVISIT do it directly, no locking games */ | ||
| 276 | spin_unlock(&musb->lock); | ||
| 277 | musb_gadget_set_halt(&musb_ep->end_point, 0); | ||
| 278 | spin_lock(&musb->lock); | ||
| 279 | |||
| 280 | /* select ep0 again */ | ||
| 281 | musb_ep_select(mbase, 0); | ||
| 282 | handled = 1; | ||
| 283 | } break; | ||
| 284 | default: | ||
| 285 | /* class, vendor, etc ... delegate */ | ||
| 286 | handled = 0; | ||
| 287 | break; | ||
| 288 | } | ||
| 289 | break; | ||
| 290 | |||
| 291 | case USB_REQ_SET_FEATURE: | ||
| 292 | switch (recip) { | ||
| 293 | case USB_RECIP_DEVICE: | ||
| 294 | handled = 1; | ||
| 295 | switch (ctrlrequest->wValue) { | ||
| 296 | case USB_DEVICE_REMOTE_WAKEUP: | ||
| 297 | musb->may_wakeup = 1; | ||
| 298 | break; | ||
| 299 | case USB_DEVICE_TEST_MODE: | ||
| 300 | if (musb->g.speed != USB_SPEED_HIGH) | ||
| 301 | goto stall; | ||
| 302 | if (ctrlrequest->wIndex & 0xff) | ||
| 303 | goto stall; | ||
| 304 | |||
| 305 | switch (ctrlrequest->wIndex >> 8) { | ||
| 306 | case 1: | ||
| 307 | pr_debug("TEST_J\n"); | ||
| 308 | /* TEST_J */ | ||
| 309 | musb->test_mode_nr = | ||
| 310 | MUSB_TEST_J; | ||
| 311 | break; | ||
| 312 | case 2: | ||
| 313 | /* TEST_K */ | ||
| 314 | pr_debug("TEST_K\n"); | ||
| 315 | musb->test_mode_nr = | ||
| 316 | MUSB_TEST_K; | ||
| 317 | break; | ||
| 318 | case 3: | ||
| 319 | /* TEST_SE0_NAK */ | ||
| 320 | pr_debug("TEST_SE0_NAK\n"); | ||
| 321 | musb->test_mode_nr = | ||
| 322 | MUSB_TEST_SE0_NAK; | ||
| 323 | break; | ||
| 324 | case 4: | ||
| 325 | /* TEST_PACKET */ | ||
| 326 | pr_debug("TEST_PACKET\n"); | ||
| 327 | musb->test_mode_nr = | ||
| 328 | MUSB_TEST_PACKET; | ||
| 329 | break; | ||
| 330 | default: | ||
| 331 | goto stall; | ||
| 332 | } | ||
| 333 | |||
| 334 | /* enter test mode after irq */ | ||
| 335 | if (handled > 0) | ||
| 336 | musb->test_mode = true; | ||
| 337 | break; | ||
| 338 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 339 | case USB_DEVICE_B_HNP_ENABLE: | ||
| 340 | if (!musb->g.is_otg) | ||
| 341 | goto stall; | ||
| 342 | musb->g.b_hnp_enable = 1; | ||
| 343 | musb_try_b_hnp_enable(musb); | ||
| 344 | break; | ||
| 345 | case USB_DEVICE_A_HNP_SUPPORT: | ||
| 346 | if (!musb->g.is_otg) | ||
| 347 | goto stall; | ||
| 348 | musb->g.a_hnp_support = 1; | ||
| 349 | break; | ||
| 350 | case USB_DEVICE_A_ALT_HNP_SUPPORT: | ||
| 351 | if (!musb->g.is_otg) | ||
| 352 | goto stall; | ||
| 353 | musb->g.a_alt_hnp_support = 1; | ||
| 354 | break; | ||
| 355 | #endif | ||
| 356 | stall: | ||
| 357 | default: | ||
| 358 | handled = -EINVAL; | ||
| 359 | break; | ||
| 360 | } | ||
| 361 | break; | ||
| 362 | |||
| 363 | case USB_RECIP_INTERFACE: | ||
| 364 | break; | ||
| 365 | |||
| 366 | case USB_RECIP_ENDPOINT:{ | ||
| 367 | const u8 epnum = | ||
| 368 | ctrlrequest->wIndex & 0x0f; | ||
| 369 | struct musb_ep *musb_ep; | ||
| 370 | struct musb_hw_ep *ep; | ||
| 371 | void __iomem *regs; | ||
| 372 | int is_in; | ||
| 373 | u16 csr; | ||
| 374 | |||
| 375 | if (epnum == 0 | ||
| 376 | || epnum >= MUSB_C_NUM_EPS | ||
| 377 | || ctrlrequest->wValue | ||
| 378 | != USB_ENDPOINT_HALT) | ||
| 379 | break; | ||
| 380 | |||
| 381 | ep = musb->endpoints + epnum; | ||
| 382 | regs = ep->regs; | ||
| 383 | is_in = ctrlrequest->wIndex & USB_DIR_IN; | ||
| 384 | if (is_in) | ||
| 385 | musb_ep = &ep->ep_in; | ||
| 386 | else | ||
| 387 | musb_ep = &ep->ep_out; | ||
| 388 | if (!musb_ep->desc) | ||
| 389 | break; | ||
| 390 | |||
| 391 | musb_ep_select(mbase, epnum); | ||
| 392 | if (is_in) { | ||
| 393 | csr = musb_readw(regs, | ||
| 394 | MUSB_TXCSR); | ||
| 395 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
| 396 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
| 397 | csr |= MUSB_TXCSR_P_SENDSTALL | ||
| 398 | | MUSB_TXCSR_CLRDATATOG | ||
| 399 | | MUSB_TXCSR_P_WZC_BITS; | ||
| 400 | musb_writew(regs, MUSB_TXCSR, | ||
| 401 | csr); | ||
| 402 | } else { | ||
| 403 | csr = musb_readw(regs, | ||
| 404 | MUSB_RXCSR); | ||
| 405 | csr |= MUSB_RXCSR_P_SENDSTALL | ||
| 406 | | MUSB_RXCSR_FLUSHFIFO | ||
| 407 | | MUSB_RXCSR_CLRDATATOG | ||
| 408 | | MUSB_TXCSR_P_WZC_BITS; | ||
| 409 | musb_writew(regs, MUSB_RXCSR, | ||
| 410 | csr); | ||
| 411 | } | ||
| 412 | |||
| 413 | /* select ep0 again */ | ||
| 414 | musb_ep_select(mbase, 0); | ||
| 415 | handled = 1; | ||
| 416 | } break; | ||
| 417 | |||
| 418 | default: | ||
| 419 | /* class, vendor, etc ... delegate */ | ||
| 420 | handled = 0; | ||
| 421 | break; | ||
| 422 | } | ||
| 423 | break; | ||
| 424 | default: | ||
| 425 | /* delegate SET_CONFIGURATION, etc */ | ||
| 426 | handled = 0; | ||
| 427 | } | ||
| 428 | } else | ||
| 429 | handled = 0; | ||
| 430 | return handled; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* we have an ep0out data packet | ||
| 434 | * Context: caller holds controller lock | ||
| 435 | */ | ||
| 436 | static void ep0_rxstate(struct musb *musb) | ||
| 437 | { | ||
| 438 | void __iomem *regs = musb->control_ep->regs; | ||
| 439 | struct usb_request *req; | ||
| 440 | u16 tmp; | ||
| 441 | |||
| 442 | req = next_ep0_request(musb); | ||
| 443 | |||
| 444 | /* read packet and ack; or stall because of gadget driver bug: | ||
| 445 | * should have provided the rx buffer before setup() returned. | ||
| 446 | */ | ||
| 447 | if (req) { | ||
| 448 | void *buf = req->buf + req->actual; | ||
| 449 | unsigned len = req->length - req->actual; | ||
| 450 | |||
| 451 | /* read the buffer */ | ||
| 452 | tmp = musb_readb(regs, MUSB_COUNT0); | ||
| 453 | if (tmp > len) { | ||
| 454 | req->status = -EOVERFLOW; | ||
| 455 | tmp = len; | ||
| 456 | } | ||
| 457 | musb_read_fifo(&musb->endpoints[0], tmp, buf); | ||
| 458 | req->actual += tmp; | ||
| 459 | tmp = MUSB_CSR0_P_SVDRXPKTRDY; | ||
| 460 | if (tmp < 64 || req->actual == req->length) { | ||
| 461 | musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; | ||
| 462 | tmp |= MUSB_CSR0_P_DATAEND; | ||
| 463 | } else | ||
| 464 | req = NULL; | ||
| 465 | } else | ||
| 466 | tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; | ||
| 467 | |||
| 468 | |||
| 469 | /* Completion handler may choose to stall, e.g. because the | ||
| 470 | * message just received holds invalid data. | ||
| 471 | */ | ||
| 472 | if (req) { | ||
| 473 | musb->ackpend = tmp; | ||
| 474 | musb_g_ep0_giveback(musb, req); | ||
| 475 | if (!musb->ackpend) | ||
| 476 | return; | ||
| 477 | musb->ackpend = 0; | ||
| 478 | } | ||
| 479 | musb_writew(regs, MUSB_CSR0, tmp); | ||
| 480 | } | ||
| 481 | |||
| 482 | /* | ||
| 483 | * transmitting to the host (IN), this code might be called from IRQ | ||
| 484 | * and from kernel thread. | ||
| 485 | * | ||
| 486 | * Context: caller holds controller lock | ||
| 487 | */ | ||
| 488 | static void ep0_txstate(struct musb *musb) | ||
| 489 | { | ||
| 490 | void __iomem *regs = musb->control_ep->regs; | ||
| 491 | struct usb_request *request = next_ep0_request(musb); | ||
| 492 | u16 csr = MUSB_CSR0_TXPKTRDY; | ||
| 493 | u8 *fifo_src; | ||
| 494 | u8 fifo_count; | ||
| 495 | |||
| 496 | if (!request) { | ||
| 497 | /* WARN_ON(1); */ | ||
| 498 | DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); | ||
| 499 | return; | ||
| 500 | } | ||
| 501 | |||
| 502 | /* load the data */ | ||
| 503 | fifo_src = (u8 *) request->buf + request->actual; | ||
| 504 | fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, | ||
| 505 | request->length - request->actual); | ||
| 506 | musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); | ||
| 507 | request->actual += fifo_count; | ||
| 508 | |||
| 509 | /* update the flags */ | ||
| 510 | if (fifo_count < MUSB_MAX_END0_PACKET | ||
| 511 | || request->actual == request->length) { | ||
| 512 | musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; | ||
| 513 | csr |= MUSB_CSR0_P_DATAEND; | ||
| 514 | } else | ||
| 515 | request = NULL; | ||
| 516 | |||
| 517 | /* report completions as soon as the fifo's loaded; there's no | ||
| 518 | * win in waiting till this last packet gets acked. (other than | ||
| 519 | * very precise fault reporting, needed by USB TMC; possible with | ||
| 520 | * this hardware, but not usable from portable gadget drivers.) | ||
| 521 | */ | ||
| 522 | if (request) { | ||
| 523 | musb->ackpend = csr; | ||
| 524 | musb_g_ep0_giveback(musb, request); | ||
| 525 | if (!musb->ackpend) | ||
| 526 | return; | ||
| 527 | musb->ackpend = 0; | ||
| 528 | } | ||
| 529 | |||
| 530 | /* send it out, triggering a "txpktrdy cleared" irq */ | ||
| 531 | musb_writew(regs, MUSB_CSR0, csr); | ||
| 532 | } | ||
| 533 | |||
| 534 | /* | ||
| 535 | * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. | ||
| 536 | * Fields are left in USB byte-order. | ||
| 537 | * | ||
| 538 | * Context: caller holds controller lock. | ||
| 539 | */ | ||
| 540 | static void | ||
| 541 | musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) | ||
| 542 | { | ||
| 543 | struct usb_request *r; | ||
| 544 | void __iomem *regs = musb->control_ep->regs; | ||
| 545 | |||
| 546 | musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); | ||
| 547 | |||
| 548 | /* NOTE: earlier 2.6 versions changed setup packets to host | ||
| 549 | * order, but now USB packets always stay in USB byte order. | ||
| 550 | */ | ||
| 551 | DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n", | ||
| 552 | req->bRequestType, | ||
| 553 | req->bRequest, | ||
| 554 | le16_to_cpu(req->wValue), | ||
| 555 | le16_to_cpu(req->wIndex), | ||
| 556 | le16_to_cpu(req->wLength)); | ||
| 557 | |||
| 558 | /* clean up any leftover transfers */ | ||
| 559 | r = next_ep0_request(musb); | ||
| 560 | if (r) | ||
| 561 | musb_g_ep0_giveback(musb, r); | ||
| 562 | |||
| 563 | /* For zero-data requests we want to delay the STATUS stage to | ||
| 564 | * avoid SETUPEND errors. If we read data (OUT), delay accepting | ||
| 565 | * packets until there's a buffer to store them in. | ||
| 566 | * | ||
| 567 | * If we write data, the controller acts happier if we enable | ||
| 568 | * the TX FIFO right away, and give the controller a moment | ||
| 569 | * to switch modes... | ||
| 570 | */ | ||
| 571 | musb->set_address = false; | ||
| 572 | musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; | ||
| 573 | if (req->wLength == 0) { | ||
| 574 | if (req->bRequestType & USB_DIR_IN) | ||
| 575 | musb->ackpend |= MUSB_CSR0_TXPKTRDY; | ||
| 576 | musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; | ||
| 577 | } else if (req->bRequestType & USB_DIR_IN) { | ||
| 578 | musb->ep0_state = MUSB_EP0_STAGE_TX; | ||
| 579 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); | ||
| 580 | while ((musb_readw(regs, MUSB_CSR0) | ||
| 581 | & MUSB_CSR0_RXPKTRDY) != 0) | ||
| 582 | cpu_relax(); | ||
| 583 | musb->ackpend = 0; | ||
| 584 | } else | ||
| 585 | musb->ep0_state = MUSB_EP0_STAGE_RX; | ||
| 586 | } | ||
| 587 | |||
| 588 | static int | ||
| 589 | forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) | ||
| 590 | __releases(musb->lock) | ||
| 591 | __acquires(musb->lock) | ||
| 592 | { | ||
| 593 | int retval; | ||
| 594 | if (!musb->gadget_driver) | ||
| 595 | return -EOPNOTSUPP; | ||
| 596 | spin_unlock(&musb->lock); | ||
| 597 | retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); | ||
| 598 | spin_lock(&musb->lock); | ||
| 599 | return retval; | ||
| 600 | } | ||
| 601 | |||
| 602 | /* | ||
| 603 | * Handle peripheral ep0 interrupt | ||
| 604 | * | ||
| 605 | * Context: irq handler; we won't re-enter the driver that way. | ||
| 606 | */ | ||
| 607 | irqreturn_t musb_g_ep0_irq(struct musb *musb) | ||
| 608 | { | ||
| 609 | u16 csr; | ||
| 610 | u16 len; | ||
| 611 | void __iomem *mbase = musb->mregs; | ||
| 612 | void __iomem *regs = musb->endpoints[0].regs; | ||
| 613 | irqreturn_t retval = IRQ_NONE; | ||
| 614 | |||
| 615 | musb_ep_select(mbase, 0); /* select ep0 */ | ||
| 616 | csr = musb_readw(regs, MUSB_CSR0); | ||
| 617 | len = musb_readb(regs, MUSB_COUNT0); | ||
| 618 | |||
| 619 | DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n", | ||
| 620 | csr, len, | ||
| 621 | musb_readb(mbase, MUSB_FADDR), | ||
| 622 | decode_ep0stage(musb->ep0_state)); | ||
| 623 | |||
| 624 | /* I sent a stall.. need to acknowledge it now.. */ | ||
| 625 | if (csr & MUSB_CSR0_P_SENTSTALL) { | ||
| 626 | musb_writew(regs, MUSB_CSR0, | ||
| 627 | csr & ~MUSB_CSR0_P_SENTSTALL); | ||
| 628 | retval = IRQ_HANDLED; | ||
| 629 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 630 | csr = musb_readw(regs, MUSB_CSR0); | ||
| 631 | } | ||
| 632 | |||
| 633 | /* request ended "early" */ | ||
| 634 | if (csr & MUSB_CSR0_P_SETUPEND) { | ||
| 635 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); | ||
| 636 | retval = IRQ_HANDLED; | ||
| 637 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 638 | csr = musb_readw(regs, MUSB_CSR0); | ||
| 639 | /* NOTE: request may need completion */ | ||
| 640 | } | ||
| 641 | |||
| 642 | /* docs from Mentor only describe tx, rx, and idle/setup states. | ||
| 643 | * we need to handle nuances around status stages, and also the | ||
| 644 | * case where status and setup stages come back-to-back ... | ||
| 645 | */ | ||
| 646 | switch (musb->ep0_state) { | ||
| 647 | |||
| 648 | case MUSB_EP0_STAGE_TX: | ||
| 649 | /* irq on clearing txpktrdy */ | ||
| 650 | if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { | ||
| 651 | ep0_txstate(musb); | ||
| 652 | retval = IRQ_HANDLED; | ||
| 653 | } | ||
| 654 | break; | ||
| 655 | |||
| 656 | case MUSB_EP0_STAGE_RX: | ||
| 657 | /* irq on set rxpktrdy */ | ||
| 658 | if (csr & MUSB_CSR0_RXPKTRDY) { | ||
| 659 | ep0_rxstate(musb); | ||
| 660 | retval = IRQ_HANDLED; | ||
| 661 | } | ||
| 662 | break; | ||
| 663 | |||
| 664 | case MUSB_EP0_STAGE_STATUSIN: | ||
| 665 | /* end of sequence #2 (OUT/RX state) or #3 (no data) */ | ||
| 666 | |||
| 667 | /* update address (if needed) only @ the end of the | ||
| 668 | * status phase per usb spec, which also guarantees | ||
| 669 | * we get 10 msec to receive this irq... until this | ||
| 670 | * is done we won't see the next packet. | ||
| 671 | */ | ||
| 672 | if (musb->set_address) { | ||
| 673 | musb->set_address = false; | ||
| 674 | musb_writeb(mbase, MUSB_FADDR, musb->address); | ||
| 675 | } | ||
| 676 | |||
| 677 | /* enter test mode if needed (exit by reset) */ | ||
| 678 | else if (musb->test_mode) { | ||
| 679 | DBG(1, "entering TESTMODE\n"); | ||
| 680 | |||
| 681 | if (MUSB_TEST_PACKET == musb->test_mode_nr) | ||
| 682 | musb_load_testpacket(musb); | ||
| 683 | |||
| 684 | musb_writeb(mbase, MUSB_TESTMODE, | ||
| 685 | musb->test_mode_nr); | ||
| 686 | } | ||
| 687 | /* FALLTHROUGH */ | ||
| 688 | |||
| 689 | case MUSB_EP0_STAGE_STATUSOUT: | ||
| 690 | /* end of sequence #1: write to host (TX state) */ | ||
| 691 | { | ||
| 692 | struct usb_request *req; | ||
| 693 | |||
| 694 | req = next_ep0_request(musb); | ||
| 695 | if (req) | ||
| 696 | musb_g_ep0_giveback(musb, req); | ||
| 697 | } | ||
| 698 | retval = IRQ_HANDLED; | ||
| 699 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 700 | /* FALLTHROUGH */ | ||
| 701 | |||
| 702 | case MUSB_EP0_STAGE_SETUP: | ||
| 703 | if (csr & MUSB_CSR0_RXPKTRDY) { | ||
| 704 | struct usb_ctrlrequest setup; | ||
| 705 | int handled = 0; | ||
| 706 | |||
| 707 | if (len != 8) { | ||
| 708 | ERR("SETUP packet len %d != 8 ?\n", len); | ||
| 709 | break; | ||
| 710 | } | ||
| 711 | musb_read_setup(musb, &setup); | ||
| 712 | retval = IRQ_HANDLED; | ||
| 713 | |||
| 714 | /* sometimes the RESET won't be reported */ | ||
| 715 | if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { | ||
| 716 | u8 power; | ||
| 717 | |||
| 718 | printk(KERN_NOTICE "%s: peripheral reset " | ||
| 719 | "irq lost!\n", | ||
| 720 | musb_driver_name); | ||
| 721 | power = musb_readb(mbase, MUSB_POWER); | ||
| 722 | musb->g.speed = (power & MUSB_POWER_HSMODE) | ||
| 723 | ? USB_SPEED_HIGH : USB_SPEED_FULL; | ||
| 724 | |||
| 725 | } | ||
| 726 | |||
| 727 | switch (musb->ep0_state) { | ||
| 728 | |||
| 729 | /* sequence #3 (no data stage), includes requests | ||
| 730 | * we can't forward (notably SET_ADDRESS and the | ||
| 731 | * device/endpoint feature set/clear operations) | ||
| 732 | * plus SET_CONFIGURATION and others we must | ||
| 733 | */ | ||
| 734 | case MUSB_EP0_STAGE_ACKWAIT: | ||
| 735 | handled = service_zero_data_request( | ||
| 736 | musb, &setup); | ||
| 737 | |||
| 738 | /* status stage might be immediate */ | ||
| 739 | if (handled > 0) { | ||
| 740 | musb->ackpend |= MUSB_CSR0_P_DATAEND; | ||
| 741 | musb->ep0_state = | ||
| 742 | MUSB_EP0_STAGE_STATUSIN; | ||
| 743 | } | ||
| 744 | break; | ||
| 745 | |||
| 746 | /* sequence #1 (IN to host), includes GET_STATUS | ||
| 747 | * requests that we can't forward, GET_DESCRIPTOR | ||
| 748 | * and others that we must | ||
| 749 | */ | ||
| 750 | case MUSB_EP0_STAGE_TX: | ||
| 751 | handled = service_in_request(musb, &setup); | ||
| 752 | if (handled > 0) { | ||
| 753 | musb->ackpend = MUSB_CSR0_TXPKTRDY | ||
| 754 | | MUSB_CSR0_P_DATAEND; | ||
| 755 | musb->ep0_state = | ||
| 756 | MUSB_EP0_STAGE_STATUSOUT; | ||
| 757 | } | ||
| 758 | break; | ||
| 759 | |||
| 760 | /* sequence #2 (OUT from host), always forward */ | ||
| 761 | default: /* MUSB_EP0_STAGE_RX */ | ||
| 762 | break; | ||
| 763 | } | ||
| 764 | |||
| 765 | DBG(3, "handled %d, csr %04x, ep0stage %s\n", | ||
| 766 | handled, csr, | ||
| 767 | decode_ep0stage(musb->ep0_state)); | ||
| 768 | |||
| 769 | /* unless we need to delegate this to the gadget | ||
| 770 | * driver, we know how to wrap this up: csr0 has | ||
| 771 | * not yet been written. | ||
| 772 | */ | ||
| 773 | if (handled < 0) | ||
| 774 | goto stall; | ||
| 775 | else if (handled > 0) | ||
| 776 | goto finish; | ||
| 777 | |||
| 778 | handled = forward_to_driver(musb, &setup); | ||
| 779 | if (handled < 0) { | ||
| 780 | musb_ep_select(mbase, 0); | ||
| 781 | stall: | ||
| 782 | DBG(3, "stall (%d)\n", handled); | ||
| 783 | musb->ackpend |= MUSB_CSR0_P_SENDSTALL; | ||
| 784 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 785 | finish: | ||
| 786 | musb_writew(regs, MUSB_CSR0, | ||
| 787 | musb->ackpend); | ||
| 788 | musb->ackpend = 0; | ||
| 789 | } | ||
| 790 | } | ||
| 791 | break; | ||
| 792 | |||
| 793 | case MUSB_EP0_STAGE_ACKWAIT: | ||
| 794 | /* This should not happen. But happens with tusb6010 with | ||
| 795 | * g_file_storage and high speed. Do nothing. | ||
| 796 | */ | ||
| 797 | retval = IRQ_HANDLED; | ||
| 798 | break; | ||
| 799 | |||
| 800 | default: | ||
| 801 | /* "can't happen" */ | ||
| 802 | WARN_ON(1); | ||
| 803 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); | ||
| 804 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 805 | break; | ||
| 806 | } | ||
| 807 | |||
| 808 | return retval; | ||
| 809 | } | ||
| 810 | |||
| 811 | |||
| 812 | static int | ||
| 813 | musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) | ||
| 814 | { | ||
| 815 | /* always enabled */ | ||
| 816 | return -EINVAL; | ||
| 817 | } | ||
| 818 | |||
| 819 | static int musb_g_ep0_disable(struct usb_ep *e) | ||
| 820 | { | ||
| 821 | /* always enabled */ | ||
| 822 | return -EINVAL; | ||
| 823 | } | ||
| 824 | |||
| 825 | static int | ||
| 826 | musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) | ||
| 827 | { | ||
| 828 | struct musb_ep *ep; | ||
| 829 | struct musb_request *req; | ||
| 830 | struct musb *musb; | ||
| 831 | int status; | ||
| 832 | unsigned long lockflags; | ||
| 833 | void __iomem *regs; | ||
| 834 | |||
| 835 | if (!e || !r) | ||
| 836 | return -EINVAL; | ||
| 837 | |||
| 838 | ep = to_musb_ep(e); | ||
| 839 | musb = ep->musb; | ||
| 840 | regs = musb->control_ep->regs; | ||
| 841 | |||
| 842 | req = to_musb_request(r); | ||
| 843 | req->musb = musb; | ||
| 844 | req->request.actual = 0; | ||
| 845 | req->request.status = -EINPROGRESS; | ||
| 846 | req->tx = ep->is_in; | ||
| 847 | |||
| 848 | spin_lock_irqsave(&musb->lock, lockflags); | ||
| 849 | |||
| 850 | if (!list_empty(&ep->req_list)) { | ||
| 851 | status = -EBUSY; | ||
| 852 | goto cleanup; | ||
| 853 | } | ||
| 854 | |||
| 855 | switch (musb->ep0_state) { | ||
| 856 | case MUSB_EP0_STAGE_RX: /* control-OUT data */ | ||
| 857 | case MUSB_EP0_STAGE_TX: /* control-IN data */ | ||
| 858 | case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ | ||
| 859 | status = 0; | ||
| 860 | break; | ||
| 861 | default: | ||
| 862 | DBG(1, "ep0 request queued in state %d\n", | ||
| 863 | musb->ep0_state); | ||
| 864 | status = -EINVAL; | ||
| 865 | goto cleanup; | ||
| 866 | } | ||
| 867 | |||
| 868 | /* add request to the list */ | ||
| 869 | list_add_tail(&(req->request.list), &(ep->req_list)); | ||
| 870 | |||
| 871 | DBG(3, "queue to %s (%s), length=%d\n", | ||
| 872 | ep->name, ep->is_in ? "IN/TX" : "OUT/RX", | ||
| 873 | req->request.length); | ||
| 874 | |||
| 875 | musb_ep_select(musb->mregs, 0); | ||
| 876 | |||
| 877 | /* sequence #1, IN ... start writing the data */ | ||
| 878 | if (musb->ep0_state == MUSB_EP0_STAGE_TX) | ||
| 879 | ep0_txstate(musb); | ||
| 880 | |||
| 881 | /* sequence #3, no-data ... issue IN status */ | ||
| 882 | else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { | ||
| 883 | if (req->request.length) | ||
| 884 | status = -EINVAL; | ||
| 885 | else { | ||
| 886 | musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; | ||
| 887 | musb_writew(regs, MUSB_CSR0, | ||
| 888 | musb->ackpend | MUSB_CSR0_P_DATAEND); | ||
| 889 | musb->ackpend = 0; | ||
| 890 | musb_g_ep0_giveback(ep->musb, r); | ||
| 891 | } | ||
| 892 | |||
| 893 | /* else for sequence #2 (OUT), caller provides a buffer | ||
| 894 | * before the next packet arrives. deferred responses | ||
| 895 | * (after SETUP is acked) are racey. | ||
| 896 | */ | ||
| 897 | } else if (musb->ackpend) { | ||
| 898 | musb_writew(regs, MUSB_CSR0, musb->ackpend); | ||
| 899 | musb->ackpend = 0; | ||
| 900 | } | ||
| 901 | |||
| 902 | cleanup: | ||
| 903 | spin_unlock_irqrestore(&musb->lock, lockflags); | ||
| 904 | return status; | ||
| 905 | } | ||
| 906 | |||
| 907 | static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) | ||
| 908 | { | ||
| 909 | /* we just won't support this */ | ||
| 910 | return -EINVAL; | ||
| 911 | } | ||
| 912 | |||
| 913 | static int musb_g_ep0_halt(struct usb_ep *e, int value) | ||
| 914 | { | ||
| 915 | struct musb_ep *ep; | ||
| 916 | struct musb *musb; | ||
| 917 | void __iomem *base, *regs; | ||
| 918 | unsigned long flags; | ||
| 919 | int status; | ||
| 920 | u16 csr; | ||
| 921 | |||
| 922 | if (!e || !value) | ||
| 923 | return -EINVAL; | ||
| 924 | |||
| 925 | ep = to_musb_ep(e); | ||
| 926 | musb = ep->musb; | ||
| 927 | base = musb->mregs; | ||
| 928 | regs = musb->control_ep->regs; | ||
| 929 | status = 0; | ||
| 930 | |||
| 931 | spin_lock_irqsave(&musb->lock, flags); | ||
| 932 | |||
| 933 | if (!list_empty(&ep->req_list)) { | ||
| 934 | status = -EBUSY; | ||
| 935 | goto cleanup; | ||
| 936 | } | ||
| 937 | |||
| 938 | musb_ep_select(base, 0); | ||
| 939 | csr = musb->ackpend; | ||
| 940 | |||
| 941 | switch (musb->ep0_state) { | ||
| 942 | |||
| 943 | /* Stalls are usually issued after parsing SETUP packet, either | ||
| 944 | * directly in irq context from setup() or else later. | ||
| 945 | */ | ||
| 946 | case MUSB_EP0_STAGE_TX: /* control-IN data */ | ||
| 947 | case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ | ||
| 948 | case MUSB_EP0_STAGE_RX: /* control-OUT data */ | ||
| 949 | csr = musb_readw(regs, MUSB_CSR0); | ||
| 950 | /* FALLTHROUGH */ | ||
| 951 | |||
| 952 | /* It's also OK to issue stalls during callbacks when a non-empty | ||
| 953 | * DATA stage buffer has been read (or even written). | ||
| 954 | */ | ||
| 955 | case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ | ||
| 956 | case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ | ||
| 957 | |||
| 958 | csr |= MUSB_CSR0_P_SENDSTALL; | ||
| 959 | musb_writew(regs, MUSB_CSR0, csr); | ||
| 960 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
| 961 | musb->ackpend = 0; | ||
| 962 | break; | ||
| 963 | default: | ||
| 964 | DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state); | ||
| 965 | status = -EINVAL; | ||
| 966 | } | ||
| 967 | |||
| 968 | cleanup: | ||
| 969 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 970 | return status; | ||
| 971 | } | ||
| 972 | |||
| 973 | const struct usb_ep_ops musb_g_ep0_ops = { | ||
| 974 | .enable = musb_g_ep0_enable, | ||
| 975 | .disable = musb_g_ep0_disable, | ||
| 976 | .alloc_request = musb_alloc_request, | ||
| 977 | .free_request = musb_free_request, | ||
| 978 | .queue = musb_g_ep0_queue, | ||
| 979 | .dequeue = musb_g_ep0_dequeue, | ||
| 980 | .set_halt = musb_g_ep0_halt, | ||
| 981 | }; | ||
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c new file mode 100644 index 000000000000..8b4be012669a --- /dev/null +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -0,0 +1,2170 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver host support | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/module.h> | ||
| 36 | #include <linux/kernel.h> | ||
| 37 | #include <linux/delay.h> | ||
| 38 | #include <linux/sched.h> | ||
| 39 | #include <linux/slab.h> | ||
| 40 | #include <linux/errno.h> | ||
| 41 | #include <linux/init.h> | ||
| 42 | #include <linux/list.h> | ||
| 43 | |||
| 44 | #include "musb_core.h" | ||
| 45 | #include "musb_host.h" | ||
| 46 | |||
| 47 | |||
| 48 | /* MUSB HOST status 22-mar-2006 | ||
| 49 | * | ||
| 50 | * - There's still lots of partial code duplication for fault paths, so | ||
| 51 | * they aren't handled as consistently as they need to be. | ||
| 52 | * | ||
| 53 | * - PIO mostly behaved when last tested. | ||
| 54 | * + including ep0, with all usbtest cases 9, 10 | ||
| 55 | * + usbtest 14 (ep0out) doesn't seem to run at all | ||
| 56 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest | ||
| 57 | * configurations, but otherwise double buffering passes basic tests. | ||
| 58 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. | ||
| 59 | * | ||
| 60 | * - DMA (CPPI) ... partially behaves, not currently recommended | ||
| 61 | * + about 1/15 the speed of typical EHCI implementations (PCI) | ||
| 62 | * + RX, all too often reqpkt seems to misbehave after tx | ||
| 63 | * + TX, no known issues (other than evident silicon issue) | ||
| 64 | * | ||
| 65 | * - DMA (Mentor/OMAP) ...has at least toggle update problems | ||
| 66 | * | ||
| 67 | * - Still no traffic scheduling code to make NAKing for bulk or control | ||
| 68 | * transfers unable to starve other requests; or to make efficient use | ||
| 69 | * of hardware with periodic transfers. (Note that network drivers | ||
| 70 | * commonly post bulk reads that stay pending for a long time; these | ||
| 71 | * would make very visible trouble.) | ||
| 72 | * | ||
| 73 | * - Not tested with HNP, but some SRP paths seem to behave. | ||
| 74 | * | ||
| 75 | * NOTE 24-August-2006: | ||
| 76 | * | ||
| 77 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an | ||
| 78 | * extra endpoint for periodic use enabling hub + keybd + mouse. That | ||
| 79 | * mostly works, except that with "usbnet" it's easy to trigger cases | ||
| 80 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", | ||
| 81 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses | ||
| 82 | * although ARP RX wins. (That test was done with a full speed link.) | ||
| 83 | */ | ||
| 84 | |||
| 85 | |||
| 86 | /* | ||
| 87 | * NOTE on endpoint usage: | ||
| 88 | * | ||
| 89 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN | ||
| 90 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | ||
| 91 | * | ||
| 92 | * (Yes, bulk _could_ use more of the endpoints than that, and would even | ||
| 93 | * benefit from it ... one remote device may easily be NAKing while others | ||
| 94 | * need to perform transfers in that same direction. The same thing could | ||
| 95 | * be done in software though, assuming dma cooperates.) | ||
| 96 | * | ||
| 97 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | ||
| 98 | * So far that scheduling is both dumb and optimistic: the endpoint will be | ||
| 99 | * "claimed" until its software queue is no longer refilled. No multiplexing | ||
| 100 | * of transfers between endpoints, or anything clever. | ||
| 101 | */ | ||
| 102 | |||
| 103 | |||
| 104 | static void musb_ep_program(struct musb *musb, u8 epnum, | ||
| 105 | struct urb *urb, unsigned int nOut, | ||
| 106 | u8 *buf, u32 len); | ||
| 107 | |||
| 108 | /* | ||
| 109 | * Clear TX fifo. Needed to avoid BABBLE errors. | ||
| 110 | */ | ||
| 111 | static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | ||
| 112 | { | ||
| 113 | void __iomem *epio = ep->regs; | ||
| 114 | u16 csr; | ||
| 115 | int retries = 1000; | ||
| 116 | |||
| 117 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 118 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
| 119 | DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | ||
| 120 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
| 121 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 122 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 123 | if (retries-- < 1) { | ||
| 124 | ERR("Could not flush host TX fifo: csr: %04x\n", csr); | ||
| 125 | return; | ||
| 126 | } | ||
| 127 | mdelay(1); | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 132 | * Start transmit. Caller is responsible for locking shared resources. | ||
| 133 | * musb must be locked. | ||
| 134 | */ | ||
| 135 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) | ||
| 136 | { | ||
| 137 | u16 txcsr; | ||
| 138 | |||
| 139 | /* NOTE: no locks here; caller should lock and select EP */ | ||
| 140 | if (ep->epnum) { | ||
| 141 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | ||
| 142 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; | ||
| 143 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | ||
| 144 | } else { | ||
| 145 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; | ||
| 146 | musb_writew(ep->regs, MUSB_CSR0, txcsr); | ||
| 147 | } | ||
| 148 | |||
| 149 | } | ||
| 150 | |||
| 151 | static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) | ||
| 152 | { | ||
| 153 | u16 txcsr; | ||
| 154 | |||
| 155 | /* NOTE: no locks here; caller should lock and select EP */ | ||
| 156 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | ||
| 157 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | ||
| 158 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | ||
| 159 | } | ||
| 160 | |||
| 161 | /* | ||
| 162 | * Start the URB at the front of an endpoint's queue | ||
| 163 | * end must be claimed from the caller. | ||
| 164 | * | ||
| 165 | * Context: controller locked, irqs blocked | ||
| 166 | */ | ||
| 167 | static void | ||
| 168 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | ||
| 169 | { | ||
| 170 | u16 frame; | ||
| 171 | u32 len; | ||
| 172 | void *buf; | ||
| 173 | void __iomem *mbase = musb->mregs; | ||
| 174 | struct urb *urb = next_urb(qh); | ||
| 175 | struct musb_hw_ep *hw_ep = qh->hw_ep; | ||
| 176 | unsigned pipe = urb->pipe; | ||
| 177 | u8 address = usb_pipedevice(pipe); | ||
| 178 | int epnum = hw_ep->epnum; | ||
| 179 | |||
| 180 | /* initialize software qh state */ | ||
| 181 | qh->offset = 0; | ||
| 182 | qh->segsize = 0; | ||
| 183 | |||
| 184 | /* gather right source of data */ | ||
| 185 | switch (qh->type) { | ||
| 186 | case USB_ENDPOINT_XFER_CONTROL: | ||
| 187 | /* control transfers always start with SETUP */ | ||
| 188 | is_in = 0; | ||
| 189 | hw_ep->out_qh = qh; | ||
| 190 | musb->ep0_stage = MUSB_EP0_START; | ||
| 191 | buf = urb->setup_packet; | ||
| 192 | len = 8; | ||
| 193 | break; | ||
| 194 | case USB_ENDPOINT_XFER_ISOC: | ||
| 195 | qh->iso_idx = 0; | ||
| 196 | qh->frame = 0; | ||
| 197 | buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; | ||
| 198 | len = urb->iso_frame_desc[0].length; | ||
| 199 | break; | ||
| 200 | default: /* bulk, interrupt */ | ||
| 201 | buf = urb->transfer_buffer; | ||
| 202 | len = urb->transfer_buffer_length; | ||
| 203 | } | ||
| 204 | |||
| 205 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | ||
| 206 | qh, urb, address, qh->epnum, | ||
| 207 | is_in ? "in" : "out", | ||
| 208 | ({char *s; switch (qh->type) { | ||
| 209 | case USB_ENDPOINT_XFER_CONTROL: s = ""; break; | ||
| 210 | case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; | ||
| 211 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | ||
| 212 | default: s = "-intr"; break; | ||
| 213 | }; s; }), | ||
| 214 | epnum, buf, len); | ||
| 215 | |||
| 216 | /* Configure endpoint */ | ||
| 217 | if (is_in || hw_ep->is_shared_fifo) | ||
| 218 | hw_ep->in_qh = qh; | ||
| 219 | else | ||
| 220 | hw_ep->out_qh = qh; | ||
| 221 | musb_ep_program(musb, epnum, urb, !is_in, buf, len); | ||
| 222 | |||
| 223 | /* transmit may have more work: start it when it is time */ | ||
| 224 | if (is_in) | ||
| 225 | return; | ||
| 226 | |||
| 227 | /* determine if the time is right for a periodic transfer */ | ||
| 228 | switch (qh->type) { | ||
| 229 | case USB_ENDPOINT_XFER_ISOC: | ||
| 230 | case USB_ENDPOINT_XFER_INT: | ||
| 231 | DBG(3, "check whether there's still time for periodic Tx\n"); | ||
| 232 | qh->iso_idx = 0; | ||
| 233 | frame = musb_readw(mbase, MUSB_FRAME); | ||
| 234 | /* FIXME this doesn't implement that scheduling policy ... | ||
| 235 | * or handle framecounter wrapping | ||
| 236 | */ | ||
| 237 | if ((urb->transfer_flags & URB_ISO_ASAP) | ||
| 238 | || (frame >= urb->start_frame)) { | ||
| 239 | /* REVISIT the SOF irq handler shouldn't duplicate | ||
| 240 | * this code; and we don't init urb->start_frame... | ||
| 241 | */ | ||
| 242 | qh->frame = 0; | ||
| 243 | goto start; | ||
| 244 | } else { | ||
| 245 | qh->frame = urb->start_frame; | ||
| 246 | /* enable SOF interrupt so we can count down */ | ||
| 247 | DBG(1, "SOF for %d\n", epnum); | ||
| 248 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ | ||
| 249 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | ||
| 250 | #endif | ||
| 251 | } | ||
| 252 | break; | ||
| 253 | default: | ||
| 254 | start: | ||
| 255 | DBG(4, "Start TX%d %s\n", epnum, | ||
| 256 | hw_ep->tx_channel ? "dma" : "pio"); | ||
| 257 | |||
| 258 | if (!hw_ep->tx_channel) | ||
| 259 | musb_h_tx_start(hw_ep); | ||
| 260 | else if (is_cppi_enabled() || tusb_dma_omap()) | ||
| 261 | cppi_host_txdma_start(hw_ep); | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | /* caller owns controller lock, irqs are blocked */ | ||
| 266 | static void | ||
| 267 | __musb_giveback(struct musb *musb, struct urb *urb, int status) | ||
| 268 | __releases(musb->lock) | ||
| 269 | __acquires(musb->lock) | ||
| 270 | { | ||
| 271 | DBG(({ int level; switch (urb->status) { | ||
| 272 | case 0: | ||
| 273 | level = 4; | ||
| 274 | break; | ||
| 275 | /* common/boring faults */ | ||
| 276 | case -EREMOTEIO: | ||
| 277 | case -ESHUTDOWN: | ||
| 278 | case -ECONNRESET: | ||
| 279 | case -EPIPE: | ||
| 280 | level = 3; | ||
| 281 | break; | ||
| 282 | default: | ||
| 283 | level = 2; | ||
| 284 | break; | ||
| 285 | }; level; }), | ||
| 286 | "complete %p (%d), dev%d ep%d%s, %d/%d\n", | ||
| 287 | urb, urb->status, | ||
| 288 | usb_pipedevice(urb->pipe), | ||
| 289 | usb_pipeendpoint(urb->pipe), | ||
| 290 | usb_pipein(urb->pipe) ? "in" : "out", | ||
| 291 | urb->actual_length, urb->transfer_buffer_length | ||
| 292 | ); | ||
| 293 | |||
| 294 | spin_unlock(&musb->lock); | ||
| 295 | usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); | ||
| 296 | spin_lock(&musb->lock); | ||
| 297 | } | ||
| 298 | |||
| 299 | /* for bulk/interrupt endpoints only */ | ||
| 300 | static inline void | ||
| 301 | musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) | ||
| 302 | { | ||
| 303 | struct usb_device *udev = urb->dev; | ||
| 304 | u16 csr; | ||
| 305 | void __iomem *epio = ep->regs; | ||
| 306 | struct musb_qh *qh; | ||
| 307 | |||
| 308 | /* FIXME: the current Mentor DMA code seems to have | ||
| 309 | * problems getting toggle correct. | ||
| 310 | */ | ||
| 311 | |||
| 312 | if (is_in || ep->is_shared_fifo) | ||
| 313 | qh = ep->in_qh; | ||
| 314 | else | ||
| 315 | qh = ep->out_qh; | ||
| 316 | |||
| 317 | if (!is_in) { | ||
| 318 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 319 | usb_settoggle(udev, qh->epnum, 1, | ||
| 320 | (csr & MUSB_TXCSR_H_DATATOGGLE) | ||
| 321 | ? 1 : 0); | ||
| 322 | } else { | ||
| 323 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 324 | usb_settoggle(udev, qh->epnum, 0, | ||
| 325 | (csr & MUSB_RXCSR_H_DATATOGGLE) | ||
| 326 | ? 1 : 0); | ||
| 327 | } | ||
| 328 | } | ||
| 329 | |||
| 330 | /* caller owns controller lock, irqs are blocked */ | ||
| 331 | static struct musb_qh * | ||
| 332 | musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | ||
| 333 | { | ||
| 334 | int is_in; | ||
| 335 | struct musb_hw_ep *ep = qh->hw_ep; | ||
| 336 | struct musb *musb = ep->musb; | ||
| 337 | int ready = qh->is_ready; | ||
| 338 | |||
| 339 | if (ep->is_shared_fifo) | ||
| 340 | is_in = 1; | ||
| 341 | else | ||
| 342 | is_in = usb_pipein(urb->pipe); | ||
| 343 | |||
| 344 | /* save toggle eagerly, for paranoia */ | ||
| 345 | switch (qh->type) { | ||
| 346 | case USB_ENDPOINT_XFER_BULK: | ||
| 347 | case USB_ENDPOINT_XFER_INT: | ||
| 348 | musb_save_toggle(ep, is_in, urb); | ||
| 349 | break; | ||
| 350 | case USB_ENDPOINT_XFER_ISOC: | ||
| 351 | if (status == 0 && urb->error_count) | ||
| 352 | status = -EXDEV; | ||
| 353 | break; | ||
| 354 | } | ||
| 355 | |||
| 356 | usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); | ||
| 357 | |||
| 358 | qh->is_ready = 0; | ||
| 359 | __musb_giveback(musb, urb, status); | ||
| 360 | qh->is_ready = ready; | ||
| 361 | |||
| 362 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and | ||
| 363 | * invalidate qh as soon as list_empty(&hep->urb_list) | ||
| 364 | */ | ||
| 365 | if (list_empty(&qh->hep->urb_list)) { | ||
| 366 | struct list_head *head; | ||
| 367 | |||
| 368 | if (is_in) | ||
| 369 | ep->rx_reinit = 1; | ||
| 370 | else | ||
| 371 | ep->tx_reinit = 1; | ||
| 372 | |||
| 373 | /* clobber old pointers to this qh */ | ||
| 374 | if (is_in || ep->is_shared_fifo) | ||
| 375 | ep->in_qh = NULL; | ||
| 376 | else | ||
| 377 | ep->out_qh = NULL; | ||
| 378 | qh->hep->hcpriv = NULL; | ||
| 379 | |||
| 380 | switch (qh->type) { | ||
| 381 | |||
| 382 | case USB_ENDPOINT_XFER_ISOC: | ||
| 383 | case USB_ENDPOINT_XFER_INT: | ||
| 384 | /* this is where periodic bandwidth should be | ||
| 385 | * de-allocated if it's tracked and allocated; | ||
| 386 | * and where we'd update the schedule tree... | ||
| 387 | */ | ||
| 388 | musb->periodic[ep->epnum] = NULL; | ||
| 389 | kfree(qh); | ||
| 390 | qh = NULL; | ||
| 391 | break; | ||
| 392 | |||
| 393 | case USB_ENDPOINT_XFER_CONTROL: | ||
| 394 | case USB_ENDPOINT_XFER_BULK: | ||
| 395 | /* fifo policy for these lists, except that NAKing | ||
| 396 | * should rotate a qh to the end (for fairness). | ||
| 397 | */ | ||
| 398 | head = qh->ring.prev; | ||
| 399 | list_del(&qh->ring); | ||
| 400 | kfree(qh); | ||
| 401 | qh = first_qh(head); | ||
| 402 | break; | ||
| 403 | } | ||
| 404 | } | ||
| 405 | return qh; | ||
| 406 | } | ||
| 407 | |||
| 408 | /* | ||
| 409 | * Advance this hardware endpoint's queue, completing the specified urb and | ||
| 410 | * advancing to either the next urb queued to that qh, or else invalidating | ||
| 411 | * that qh and advancing to the next qh scheduled after the current one. | ||
| 412 | * | ||
| 413 | * Context: caller owns controller lock, irqs are blocked | ||
| 414 | */ | ||
| 415 | static void | ||
| 416 | musb_advance_schedule(struct musb *musb, struct urb *urb, | ||
| 417 | struct musb_hw_ep *hw_ep, int is_in) | ||
| 418 | { | ||
| 419 | struct musb_qh *qh; | ||
| 420 | |||
| 421 | if (is_in || hw_ep->is_shared_fifo) | ||
| 422 | qh = hw_ep->in_qh; | ||
| 423 | else | ||
| 424 | qh = hw_ep->out_qh; | ||
| 425 | |||
| 426 | if (urb->status == -EINPROGRESS) | ||
| 427 | qh = musb_giveback(qh, urb, 0); | ||
| 428 | else | ||
| 429 | qh = musb_giveback(qh, urb, urb->status); | ||
| 430 | |||
| 431 | if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { | ||
| 432 | DBG(4, "... next ep%d %cX urb %p\n", | ||
| 433 | hw_ep->epnum, is_in ? 'R' : 'T', | ||
| 434 | next_urb(qh)); | ||
| 435 | musb_start_urb(musb, is_in, qh); | ||
| 436 | } | ||
| 437 | } | ||
| 438 | |||
| 439 | static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) | ||
| 440 | { | ||
| 441 | /* we don't want fifo to fill itself again; | ||
| 442 | * ignore dma (various models), | ||
| 443 | * leave toggle alone (may not have been saved yet) | ||
| 444 | */ | ||
| 445 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; | ||
| 446 | csr &= ~(MUSB_RXCSR_H_REQPKT | ||
| 447 | | MUSB_RXCSR_H_AUTOREQ | ||
| 448 | | MUSB_RXCSR_AUTOCLEAR); | ||
| 449 | |||
| 450 | /* write 2x to allow double buffering */ | ||
| 451 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
| 452 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
| 453 | |||
| 454 | /* flush writebuffer */ | ||
| 455 | return musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
| 456 | } | ||
| 457 | |||
| 458 | /* | ||
| 459 | * PIO RX for a packet (or part of it). | ||
| 460 | */ | ||
| 461 | static bool | ||
| 462 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | ||
| 463 | { | ||
| 464 | u16 rx_count; | ||
| 465 | u8 *buf; | ||
| 466 | u16 csr; | ||
| 467 | bool done = false; | ||
| 468 | u32 length; | ||
| 469 | int do_flush = 0; | ||
| 470 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
| 471 | void __iomem *epio = hw_ep->regs; | ||
| 472 | struct musb_qh *qh = hw_ep->in_qh; | ||
| 473 | int pipe = urb->pipe; | ||
| 474 | void *buffer = urb->transfer_buffer; | ||
| 475 | |||
| 476 | /* musb_ep_select(mbase, epnum); */ | ||
| 477 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | ||
| 478 | DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | ||
| 479 | urb->transfer_buffer, qh->offset, | ||
| 480 | urb->transfer_buffer_length); | ||
| 481 | |||
| 482 | /* unload FIFO */ | ||
| 483 | if (usb_pipeisoc(pipe)) { | ||
| 484 | int status = 0; | ||
| 485 | struct usb_iso_packet_descriptor *d; | ||
| 486 | |||
| 487 | if (iso_err) { | ||
| 488 | status = -EILSEQ; | ||
| 489 | urb->error_count++; | ||
| 490 | } | ||
| 491 | |||
| 492 | d = urb->iso_frame_desc + qh->iso_idx; | ||
| 493 | buf = buffer + d->offset; | ||
| 494 | length = d->length; | ||
| 495 | if (rx_count > length) { | ||
| 496 | if (status == 0) { | ||
| 497 | status = -EOVERFLOW; | ||
| 498 | urb->error_count++; | ||
| 499 | } | ||
| 500 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | ||
| 501 | do_flush = 1; | ||
| 502 | } else | ||
| 503 | length = rx_count; | ||
| 504 | urb->actual_length += length; | ||
| 505 | d->actual_length = length; | ||
| 506 | |||
| 507 | d->status = status; | ||
| 508 | |||
| 509 | /* see if we are done */ | ||
| 510 | done = (++qh->iso_idx >= urb->number_of_packets); | ||
| 511 | } else { | ||
| 512 | /* non-isoch */ | ||
| 513 | buf = buffer + qh->offset; | ||
| 514 | length = urb->transfer_buffer_length - qh->offset; | ||
| 515 | if (rx_count > length) { | ||
| 516 | if (urb->status == -EINPROGRESS) | ||
| 517 | urb->status = -EOVERFLOW; | ||
| 518 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | ||
| 519 | do_flush = 1; | ||
| 520 | } else | ||
| 521 | length = rx_count; | ||
| 522 | urb->actual_length += length; | ||
| 523 | qh->offset += length; | ||
| 524 | |||
| 525 | /* see if we are done */ | ||
| 526 | done = (urb->actual_length == urb->transfer_buffer_length) | ||
| 527 | || (rx_count < qh->maxpacket) | ||
| 528 | || (urb->status != -EINPROGRESS); | ||
| 529 | if (done | ||
| 530 | && (urb->status == -EINPROGRESS) | ||
| 531 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | ||
| 532 | && (urb->actual_length | ||
| 533 | < urb->transfer_buffer_length)) | ||
| 534 | urb->status = -EREMOTEIO; | ||
| 535 | } | ||
| 536 | |||
| 537 | musb_read_fifo(hw_ep, length, buf); | ||
| 538 | |||
| 539 | csr = musb_readw(epio, MUSB_RXCSR); | ||
| 540 | csr |= MUSB_RXCSR_H_WZC_BITS; | ||
| 541 | if (unlikely(do_flush)) | ||
| 542 | musb_h_flush_rxfifo(hw_ep, csr); | ||
| 543 | else { | ||
| 544 | /* REVISIT this assumes AUTOCLEAR is never set */ | ||
| 545 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); | ||
| 546 | if (!done) | ||
| 547 | csr |= MUSB_RXCSR_H_REQPKT; | ||
| 548 | musb_writew(epio, MUSB_RXCSR, csr); | ||
| 549 | } | ||
| 550 | |||
| 551 | return done; | ||
| 552 | } | ||
| 553 | |||
| 554 | /* we don't always need to reinit a given side of an endpoint... | ||
| 555 | * when we do, use tx/rx reinit routine and then construct a new CSR | ||
| 556 | * to address data toggle, NYET, and DMA or PIO. | ||
| 557 | * | ||
| 558 | * it's possible that driver bugs (especially for DMA) or aborting a | ||
| 559 | * transfer might have left the endpoint busier than it should be. | ||
| 560 | * the busy/not-empty tests are basically paranoia. | ||
| 561 | */ | ||
| 562 | static void | ||
| 563 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | ||
| 564 | { | ||
| 565 | u16 csr; | ||
| 566 | |||
| 567 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. | ||
| 568 | * That always uses tx_reinit since ep0 repurposes TX register | ||
| 569 | * offsets; the initial SETUP packet is also a kind of OUT. | ||
| 570 | */ | ||
| 571 | |||
| 572 | /* if programmed for Tx, put it in RX mode */ | ||
| 573 | if (ep->is_shared_fifo) { | ||
| 574 | csr = musb_readw(ep->regs, MUSB_TXCSR); | ||
| 575 | if (csr & MUSB_TXCSR_MODE) { | ||
| 576 | musb_h_tx_flush_fifo(ep); | ||
| 577 | musb_writew(ep->regs, MUSB_TXCSR, | ||
| 578 | MUSB_TXCSR_FRCDATATOG); | ||
| 579 | } | ||
| 580 | /* clear mode (and everything else) to enable Rx */ | ||
| 581 | musb_writew(ep->regs, MUSB_TXCSR, 0); | ||
| 582 | |||
| 583 | /* scrub all previous state, clearing toggle */ | ||
| 584 | } else { | ||
| 585 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
| 586 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
| 587 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
| 588 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
| 589 | |||
| 590 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
| 591 | } | ||
| 592 | |||
| 593 | /* target addr and (for multipoint) hub addr/port */ | ||
| 594 | if (musb->is_multipoint) { | ||
| 595 | musb_writeb(ep->target_regs, MUSB_RXFUNCADDR, | ||
| 596 | qh->addr_reg); | ||
| 597 | musb_writeb(ep->target_regs, MUSB_RXHUBADDR, | ||
| 598 | qh->h_addr_reg); | ||
| 599 | musb_writeb(ep->target_regs, MUSB_RXHUBPORT, | ||
| 600 | qh->h_port_reg); | ||
| 601 | } else | ||
| 602 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); | ||
| 603 | |||
| 604 | /* protocol/endpoint, interval/NAKlimit, i/o size */ | ||
| 605 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | ||
| 606 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | ||
| 607 | /* NOTE: bulk combining rewrites high bits of maxpacket */ | ||
| 608 | musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); | ||
| 609 | |||
| 610 | ep->rx_reinit = 0; | ||
| 611 | } | ||
| 612 | |||
| 613 | |||
| 614 | /* | ||
| 615 | * Program an HDRC endpoint as per the given URB | ||
| 616 | * Context: irqs blocked, controller lock held | ||
| 617 | */ | ||
| 618 | static void musb_ep_program(struct musb *musb, u8 epnum, | ||
| 619 | struct urb *urb, unsigned int is_out, | ||
| 620 | u8 *buf, u32 len) | ||
| 621 | { | ||
| 622 | struct dma_controller *dma_controller; | ||
| 623 | struct dma_channel *dma_channel; | ||
| 624 | u8 dma_ok; | ||
| 625 | void __iomem *mbase = musb->mregs; | ||
| 626 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
| 627 | void __iomem *epio = hw_ep->regs; | ||
| 628 | struct musb_qh *qh; | ||
| 629 | u16 packet_sz; | ||
| 630 | |||
| 631 | if (!is_out || hw_ep->is_shared_fifo) | ||
| 632 | qh = hw_ep->in_qh; | ||
| 633 | else | ||
| 634 | qh = hw_ep->out_qh; | ||
| 635 | |||
| 636 | packet_sz = qh->maxpacket; | ||
| 637 | |||
| 638 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | ||
| 639 | "h_addr%02x h_port%02x bytes %d\n", | ||
| 640 | is_out ? "-->" : "<--", | ||
| 641 | epnum, urb, urb->dev->speed, | ||
| 642 | qh->addr_reg, qh->epnum, is_out ? "out" : "in", | ||
| 643 | qh->h_addr_reg, qh->h_port_reg, | ||
| 644 | len); | ||
| 645 | |||
| 646 | musb_ep_select(mbase, epnum); | ||
| 647 | |||
| 648 | /* candidate for DMA? */ | ||
| 649 | dma_controller = musb->dma_controller; | ||
| 650 | if (is_dma_capable() && epnum && dma_controller) { | ||
| 651 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; | ||
| 652 | if (!dma_channel) { | ||
| 653 | dma_channel = dma_controller->channel_alloc( | ||
| 654 | dma_controller, hw_ep, is_out); | ||
| 655 | if (is_out) | ||
| 656 | hw_ep->tx_channel = dma_channel; | ||
| 657 | else | ||
| 658 | hw_ep->rx_channel = dma_channel; | ||
| 659 | } | ||
| 660 | } else | ||
| 661 | dma_channel = NULL; | ||
| 662 | |||
| 663 | /* make sure we clear DMAEnab, autoSet bits from previous run */ | ||
| 664 | |||
| 665 | /* OUT/transmit/EP0 or IN/receive? */ | ||
| 666 | if (is_out) { | ||
| 667 | u16 csr; | ||
| 668 | u16 int_txe; | ||
| 669 | u16 load_count; | ||
| 670 | |||
| 671 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 672 | |||
| 673 | /* disable interrupt in case we flush */ | ||
| 674 | int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
| 675 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | ||
| 676 | |||
| 677 | /* general endpoint setup */ | ||
| 678 | if (epnum) { | ||
| 679 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
| 680 | |||
| 681 | /* flush all old state, set default */ | ||
| 682 | musb_h_tx_flush_fifo(hw_ep); | ||
| 683 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | ||
| 684 | | MUSB_TXCSR_DMAMODE | ||
| 685 | | MUSB_TXCSR_FRCDATATOG | ||
| 686 | | MUSB_TXCSR_H_RXSTALL | ||
| 687 | | MUSB_TXCSR_H_ERROR | ||
| 688 | | MUSB_TXCSR_TXPKTRDY | ||
| 689 | ); | ||
| 690 | csr |= MUSB_TXCSR_MODE; | ||
| 691 | |||
| 692 | if (usb_gettoggle(urb->dev, | ||
| 693 | qh->epnum, 1)) | ||
| 694 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE | ||
| 695 | | MUSB_TXCSR_H_DATATOGGLE; | ||
| 696 | else | ||
| 697 | csr |= MUSB_TXCSR_CLRDATATOG; | ||
| 698 | |||
| 699 | /* twice in case of double packet buffering */ | ||
| 700 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 701 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
| 702 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 703 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 704 | } else { | ||
| 705 | /* endpoint 0: just flush */ | ||
| 706 | musb_writew(epio, MUSB_CSR0, | ||
| 707 | csr | MUSB_CSR0_FLUSHFIFO); | ||
| 708 | musb_writew(epio, MUSB_CSR0, | ||
| 709 | csr | MUSB_CSR0_FLUSHFIFO); | ||
| 710 | } | ||
| 711 | |||
| 712 | /* target addr and (for multipoint) hub addr/port */ | ||
| 713 | if (musb->is_multipoint) { | ||
| 714 | musb_writeb(mbase, | ||
| 715 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), | ||
| 716 | qh->addr_reg); | ||
| 717 | musb_writeb(mbase, | ||
| 718 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), | ||
| 719 | qh->h_addr_reg); | ||
| 720 | musb_writeb(mbase, | ||
| 721 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), | ||
| 722 | qh->h_port_reg); | ||
| 723 | /* FIXME if !epnum, do the same for RX ... */ | ||
| 724 | } else | ||
| 725 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); | ||
| 726 | |||
| 727 | /* protocol/endpoint/interval/NAKlimit */ | ||
| 728 | if (epnum) { | ||
| 729 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | ||
| 730 | if (can_bulk_split(musb, qh->type)) | ||
| 731 | musb_writew(epio, MUSB_TXMAXP, | ||
| 732 | packet_sz | ||
| 733 | | ((hw_ep->max_packet_sz_tx / | ||
| 734 | packet_sz) - 1) << 11); | ||
| 735 | else | ||
| 736 | musb_writew(epio, MUSB_TXMAXP, | ||
| 737 | packet_sz); | ||
| 738 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | ||
| 739 | } else { | ||
| 740 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | ||
| 741 | if (musb->is_multipoint) | ||
| 742 | musb_writeb(epio, MUSB_TYPE0, | ||
| 743 | qh->type_reg); | ||
| 744 | } | ||
| 745 | |||
| 746 | if (can_bulk_split(musb, qh->type)) | ||
| 747 | load_count = min((u32) hw_ep->max_packet_sz_tx, | ||
| 748 | len); | ||
| 749 | else | ||
| 750 | load_count = min((u32) packet_sz, len); | ||
| 751 | |||
| 752 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 753 | if (dma_channel) { | ||
| 754 | |||
| 755 | /* clear previous state */ | ||
| 756 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 757 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 758 | | MUSB_TXCSR_DMAMODE | ||
| 759 | | MUSB_TXCSR_DMAENAB); | ||
| 760 | csr |= MUSB_TXCSR_MODE; | ||
| 761 | musb_writew(epio, MUSB_TXCSR, | ||
| 762 | csr | MUSB_TXCSR_MODE); | ||
| 763 | |||
| 764 | qh->segsize = min(len, dma_channel->max_len); | ||
| 765 | |||
| 766 | if (qh->segsize <= packet_sz) | ||
| 767 | dma_channel->desired_mode = 0; | ||
| 768 | else | ||
| 769 | dma_channel->desired_mode = 1; | ||
| 770 | |||
| 771 | |||
| 772 | if (dma_channel->desired_mode == 0) { | ||
| 773 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 774 | | MUSB_TXCSR_DMAMODE); | ||
| 775 | csr |= (MUSB_TXCSR_DMAENAB); | ||
| 776 | /* against programming guide */ | ||
| 777 | } else | ||
| 778 | csr |= (MUSB_TXCSR_AUTOSET | ||
| 779 | | MUSB_TXCSR_DMAENAB | ||
| 780 | | MUSB_TXCSR_DMAMODE); | ||
| 781 | |||
| 782 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 783 | |||
| 784 | dma_ok = dma_controller->channel_program( | ||
| 785 | dma_channel, packet_sz, | ||
| 786 | dma_channel->desired_mode, | ||
| 787 | urb->transfer_dma, | ||
| 788 | qh->segsize); | ||
| 789 | if (dma_ok) { | ||
| 790 | load_count = 0; | ||
| 791 | } else { | ||
| 792 | dma_controller->channel_release(dma_channel); | ||
| 793 | if (is_out) | ||
| 794 | hw_ep->tx_channel = NULL; | ||
| 795 | else | ||
| 796 | hw_ep->rx_channel = NULL; | ||
| 797 | dma_channel = NULL; | ||
| 798 | } | ||
| 799 | } | ||
| 800 | #endif | ||
| 801 | |||
| 802 | /* candidate for DMA */ | ||
| 803 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
| 804 | |||
| 805 | /* program endpoint CSRs first, then setup DMA. | ||
| 806 | * assume CPPI setup succeeds. | ||
| 807 | * defer enabling dma. | ||
| 808 | */ | ||
| 809 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 810 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 811 | | MUSB_TXCSR_DMAMODE | ||
| 812 | | MUSB_TXCSR_DMAENAB); | ||
| 813 | csr |= MUSB_TXCSR_MODE; | ||
| 814 | musb_writew(epio, MUSB_TXCSR, | ||
| 815 | csr | MUSB_TXCSR_MODE); | ||
| 816 | |||
| 817 | dma_channel->actual_len = 0L; | ||
| 818 | qh->segsize = len; | ||
| 819 | |||
| 820 | /* TX uses "rndis" mode automatically, but needs help | ||
| 821 | * to identify the zero-length-final-packet case. | ||
| 822 | */ | ||
| 823 | dma_ok = dma_controller->channel_program( | ||
| 824 | dma_channel, packet_sz, | ||
| 825 | (urb->transfer_flags | ||
| 826 | & URB_ZERO_PACKET) | ||
| 827 | == URB_ZERO_PACKET, | ||
| 828 | urb->transfer_dma, | ||
| 829 | qh->segsize); | ||
| 830 | if (dma_ok) { | ||
| 831 | load_count = 0; | ||
| 832 | } else { | ||
| 833 | dma_controller->channel_release(dma_channel); | ||
| 834 | hw_ep->tx_channel = NULL; | ||
| 835 | dma_channel = NULL; | ||
| 836 | |||
| 837 | /* REVISIT there's an error path here that | ||
| 838 | * needs handling: can't do dma, but | ||
| 839 | * there's no pio buffer address... | ||
| 840 | */ | ||
| 841 | } | ||
| 842 | } | ||
| 843 | |||
| 844 | if (load_count) { | ||
| 845 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
| 846 | |||
| 847 | /* PIO to load FIFO */ | ||
| 848 | qh->segsize = load_count; | ||
| 849 | musb_write_fifo(hw_ep, load_count, buf); | ||
| 850 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 851 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
| 852 | | MUSB_TXCSR_DMAMODE | ||
| 853 | | MUSB_TXCSR_AUTOSET); | ||
| 854 | /* write CSR */ | ||
| 855 | csr |= MUSB_TXCSR_MODE; | ||
| 856 | |||
| 857 | if (epnum) | ||
| 858 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 859 | } | ||
| 860 | |||
| 861 | /* re-enable interrupt */ | ||
| 862 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
| 863 | |||
| 864 | /* IN/receive */ | ||
| 865 | } else { | ||
| 866 | u16 csr; | ||
| 867 | |||
| 868 | if (hw_ep->rx_reinit) { | ||
| 869 | musb_rx_reinit(musb, qh, hw_ep); | ||
| 870 | |||
| 871 | /* init new state: toggle and NYET, maybe DMA later */ | ||
| 872 | if (usb_gettoggle(urb->dev, qh->epnum, 0)) | ||
| 873 | csr = MUSB_RXCSR_H_WR_DATATOGGLE | ||
| 874 | | MUSB_RXCSR_H_DATATOGGLE; | ||
| 875 | else | ||
| 876 | csr = 0; | ||
| 877 | if (qh->type == USB_ENDPOINT_XFER_INT) | ||
| 878 | csr |= MUSB_RXCSR_DISNYET; | ||
| 879 | |||
| 880 | } else { | ||
| 881 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
| 882 | |||
| 883 | if (csr & (MUSB_RXCSR_RXPKTRDY | ||
| 884 | | MUSB_RXCSR_DMAENAB | ||
| 885 | | MUSB_RXCSR_H_REQPKT)) | ||
| 886 | ERR("broken !rx_reinit, ep%d csr %04x\n", | ||
| 887 | hw_ep->epnum, csr); | ||
| 888 | |||
| 889 | /* scrub any stale state, leaving toggle alone */ | ||
| 890 | csr &= MUSB_RXCSR_DISNYET; | ||
| 891 | } | ||
| 892 | |||
| 893 | /* kick things off */ | ||
| 894 | |||
| 895 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
| 896 | /* candidate for DMA */ | ||
| 897 | if (dma_channel) { | ||
| 898 | dma_channel->actual_len = 0L; | ||
| 899 | qh->segsize = len; | ||
| 900 | |||
| 901 | /* AUTOREQ is in a DMA register */ | ||
| 902 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
| 903 | csr = musb_readw(hw_ep->regs, | ||
| 904 | MUSB_RXCSR); | ||
| 905 | |||
| 906 | /* unless caller treats short rx transfers as | ||
| 907 | * errors, we dare not queue multiple transfers. | ||
| 908 | */ | ||
| 909 | dma_ok = dma_controller->channel_program( | ||
| 910 | dma_channel, packet_sz, | ||
| 911 | !(urb->transfer_flags | ||
| 912 | & URB_SHORT_NOT_OK), | ||
| 913 | urb->transfer_dma, | ||
| 914 | qh->segsize); | ||
| 915 | if (!dma_ok) { | ||
| 916 | dma_controller->channel_release( | ||
| 917 | dma_channel); | ||
| 918 | hw_ep->rx_channel = NULL; | ||
| 919 | dma_channel = NULL; | ||
| 920 | } else | ||
| 921 | csr |= MUSB_RXCSR_DMAENAB; | ||
| 922 | } | ||
| 923 | } | ||
| 924 | |||
| 925 | csr |= MUSB_RXCSR_H_REQPKT; | ||
| 926 | DBG(7, "RXCSR%d := %04x\n", epnum, csr); | ||
| 927 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
| 928 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
| 929 | } | ||
| 930 | } | ||
| 931 | |||
| 932 | |||
| 933 | /* | ||
| 934 | * Service the default endpoint (ep0) as host. | ||
| 935 | * Return true until it's time to start the status stage. | ||
| 936 | */ | ||
| 937 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | ||
| 938 | { | ||
| 939 | bool more = false; | ||
| 940 | u8 *fifo_dest = NULL; | ||
| 941 | u16 fifo_count = 0; | ||
| 942 | struct musb_hw_ep *hw_ep = musb->control_ep; | ||
| 943 | struct musb_qh *qh = hw_ep->in_qh; | ||
| 944 | struct usb_ctrlrequest *request; | ||
| 945 | |||
| 946 | switch (musb->ep0_stage) { | ||
| 947 | case MUSB_EP0_IN: | ||
| 948 | fifo_dest = urb->transfer_buffer + urb->actual_length; | ||
| 949 | fifo_count = min(len, ((u16) (urb->transfer_buffer_length | ||
| 950 | - urb->actual_length))); | ||
| 951 | if (fifo_count < len) | ||
| 952 | urb->status = -EOVERFLOW; | ||
| 953 | |||
| 954 | musb_read_fifo(hw_ep, fifo_count, fifo_dest); | ||
| 955 | |||
| 956 | urb->actual_length += fifo_count; | ||
| 957 | if (len < qh->maxpacket) { | ||
| 958 | /* always terminate on short read; it's | ||
| 959 | * rarely reported as an error. | ||
| 960 | */ | ||
| 961 | } else if (urb->actual_length < | ||
| 962 | urb->transfer_buffer_length) | ||
| 963 | more = true; | ||
| 964 | break; | ||
| 965 | case MUSB_EP0_START: | ||
| 966 | request = (struct usb_ctrlrequest *) urb->setup_packet; | ||
| 967 | |||
| 968 | if (!request->wLength) { | ||
| 969 | DBG(4, "start no-DATA\n"); | ||
| 970 | break; | ||
| 971 | } else if (request->bRequestType & USB_DIR_IN) { | ||
| 972 | DBG(4, "start IN-DATA\n"); | ||
| 973 | musb->ep0_stage = MUSB_EP0_IN; | ||
| 974 | more = true; | ||
| 975 | break; | ||
| 976 | } else { | ||
| 977 | DBG(4, "start OUT-DATA\n"); | ||
| 978 | musb->ep0_stage = MUSB_EP0_OUT; | ||
| 979 | more = true; | ||
| 980 | } | ||
| 981 | /* FALLTHROUGH */ | ||
| 982 | case MUSB_EP0_OUT: | ||
| 983 | fifo_count = min(qh->maxpacket, ((u16) | ||
| 984 | (urb->transfer_buffer_length | ||
| 985 | - urb->actual_length))); | ||
| 986 | |||
| 987 | if (fifo_count) { | ||
| 988 | fifo_dest = (u8 *) (urb->transfer_buffer | ||
| 989 | + urb->actual_length); | ||
| 990 | DBG(3, "Sending %d bytes to %p\n", | ||
| 991 | fifo_count, fifo_dest); | ||
| 992 | musb_write_fifo(hw_ep, fifo_count, fifo_dest); | ||
| 993 | |||
| 994 | urb->actual_length += fifo_count; | ||
| 995 | more = true; | ||
| 996 | } | ||
| 997 | break; | ||
| 998 | default: | ||
| 999 | ERR("bogus ep0 stage %d\n", musb->ep0_stage); | ||
| 1000 | break; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | return more; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | /* | ||
| 1007 | * Handle default endpoint interrupt as host. Only called in IRQ time | ||
| 1008 | * from the LinuxIsr() interrupt service routine. | ||
| 1009 | * | ||
| 1010 | * called with controller irqlocked | ||
| 1011 | */ | ||
| 1012 | irqreturn_t musb_h_ep0_irq(struct musb *musb) | ||
| 1013 | { | ||
| 1014 | struct urb *urb; | ||
| 1015 | u16 csr, len; | ||
| 1016 | int status = 0; | ||
| 1017 | void __iomem *mbase = musb->mregs; | ||
| 1018 | struct musb_hw_ep *hw_ep = musb->control_ep; | ||
| 1019 | void __iomem *epio = hw_ep->regs; | ||
| 1020 | struct musb_qh *qh = hw_ep->in_qh; | ||
| 1021 | bool complete = false; | ||
| 1022 | irqreturn_t retval = IRQ_NONE; | ||
| 1023 | |||
| 1024 | /* ep0 only has one queue, "in" */ | ||
| 1025 | urb = next_urb(qh); | ||
| 1026 | |||
| 1027 | musb_ep_select(mbase, 0); | ||
| 1028 | csr = musb_readw(epio, MUSB_CSR0); | ||
| 1029 | len = (csr & MUSB_CSR0_RXPKTRDY) | ||
| 1030 | ? musb_readb(epio, MUSB_COUNT0) | ||
| 1031 | : 0; | ||
| 1032 | |||
| 1033 | DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | ||
| 1034 | csr, qh, len, urb, musb->ep0_stage); | ||
| 1035 | |||
| 1036 | /* if we just did status stage, we are done */ | ||
| 1037 | if (MUSB_EP0_STATUS == musb->ep0_stage) { | ||
| 1038 | retval = IRQ_HANDLED; | ||
| 1039 | complete = true; | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | /* prepare status */ | ||
| 1043 | if (csr & MUSB_CSR0_H_RXSTALL) { | ||
| 1044 | DBG(6, "STALLING ENDPOINT\n"); | ||
| 1045 | status = -EPIPE; | ||
| 1046 | |||
| 1047 | } else if (csr & MUSB_CSR0_H_ERROR) { | ||
| 1048 | DBG(2, "no response, csr0 %04x\n", csr); | ||
| 1049 | status = -EPROTO; | ||
| 1050 | |||
| 1051 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | ||
| 1052 | DBG(2, "control NAK timeout\n"); | ||
| 1053 | |||
| 1054 | /* NOTE: this code path would be a good place to PAUSE a | ||
| 1055 | * control transfer, if another one is queued, so that | ||
| 1056 | * ep0 is more likely to stay busy. | ||
| 1057 | * | ||
| 1058 | * if (qh->ring.next != &musb->control), then | ||
| 1059 | * we have a candidate... NAKing is *NOT* an error | ||
| 1060 | */ | ||
| 1061 | musb_writew(epio, MUSB_CSR0, 0); | ||
| 1062 | retval = IRQ_HANDLED; | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | if (status) { | ||
| 1066 | DBG(6, "aborting\n"); | ||
| 1067 | retval = IRQ_HANDLED; | ||
| 1068 | if (urb) | ||
| 1069 | urb->status = status; | ||
| 1070 | complete = true; | ||
| 1071 | |||
| 1072 | /* use the proper sequence to abort the transfer */ | ||
| 1073 | if (csr & MUSB_CSR0_H_REQPKT) { | ||
| 1074 | csr &= ~MUSB_CSR0_H_REQPKT; | ||
| 1075 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1076 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
| 1077 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1078 | } else { | ||
| 1079 | csr |= MUSB_CSR0_FLUSHFIFO; | ||
| 1080 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1081 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1082 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
| 1083 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | ||
| 1087 | |||
| 1088 | /* clear it */ | ||
| 1089 | musb_writew(epio, MUSB_CSR0, 0); | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | if (unlikely(!urb)) { | ||
| 1093 | /* stop endpoint since we have no place for its data, this | ||
| 1094 | * SHOULD NEVER HAPPEN! */ | ||
| 1095 | ERR("no URB for end 0\n"); | ||
| 1096 | |||
| 1097 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
| 1098 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
| 1099 | musb_writew(epio, MUSB_CSR0, 0); | ||
| 1100 | |||
| 1101 | goto done; | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | if (!complete) { | ||
| 1105 | /* call common logic and prepare response */ | ||
| 1106 | if (musb_h_ep0_continue(musb, len, urb)) { | ||
| 1107 | /* more packets required */ | ||
| 1108 | csr = (MUSB_EP0_IN == musb->ep0_stage) | ||
| 1109 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; | ||
| 1110 | } else { | ||
| 1111 | /* data transfer complete; perform status phase */ | ||
| 1112 | if (usb_pipeout(urb->pipe) | ||
| 1113 | || !urb->transfer_buffer_length) | ||
| 1114 | csr = MUSB_CSR0_H_STATUSPKT | ||
| 1115 | | MUSB_CSR0_H_REQPKT; | ||
| 1116 | else | ||
| 1117 | csr = MUSB_CSR0_H_STATUSPKT | ||
| 1118 | | MUSB_CSR0_TXPKTRDY; | ||
| 1119 | |||
| 1120 | /* flag status stage */ | ||
| 1121 | musb->ep0_stage = MUSB_EP0_STATUS; | ||
| 1122 | |||
| 1123 | DBG(5, "ep0 STATUS, csr %04x\n", csr); | ||
| 1124 | |||
| 1125 | } | ||
| 1126 | musb_writew(epio, MUSB_CSR0, csr); | ||
| 1127 | retval = IRQ_HANDLED; | ||
| 1128 | } else | ||
| 1129 | musb->ep0_stage = MUSB_EP0_IDLE; | ||
| 1130 | |||
| 1131 | /* call completion handler if done */ | ||
| 1132 | if (complete) | ||
| 1133 | musb_advance_schedule(musb, urb, hw_ep, 1); | ||
| 1134 | done: | ||
| 1135 | return retval; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | |||
| 1139 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 1140 | |||
| 1141 | /* Host side TX (OUT) using Mentor DMA works as follows: | ||
| 1142 | submit_urb -> | ||
| 1143 | - if queue was empty, Program Endpoint | ||
| 1144 | - ... which starts DMA to fifo in mode 1 or 0 | ||
| 1145 | |||
| 1146 | DMA Isr (transfer complete) -> TxAvail() | ||
| 1147 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens | ||
| 1148 | only in musb_cleanup_urb) | ||
| 1149 | - TxPktRdy has to be set in mode 0 or for | ||
| 1150 | short packets in mode 1. | ||
| 1151 | */ | ||
| 1152 | |||
| 1153 | #endif | ||
| 1154 | |||
| 1155 | /* Service a Tx-Available or dma completion irq for the endpoint */ | ||
| 1156 | void musb_host_tx(struct musb *musb, u8 epnum) | ||
| 1157 | { | ||
| 1158 | int pipe; | ||
| 1159 | bool done = false; | ||
| 1160 | u16 tx_csr; | ||
| 1161 | size_t wLength = 0; | ||
| 1162 | u8 *buf = NULL; | ||
| 1163 | struct urb *urb; | ||
| 1164 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
| 1165 | void __iomem *epio = hw_ep->regs; | ||
| 1166 | struct musb_qh *qh = hw_ep->out_qh; | ||
| 1167 | u32 status = 0; | ||
| 1168 | void __iomem *mbase = musb->mregs; | ||
| 1169 | struct dma_channel *dma; | ||
| 1170 | |||
| 1171 | urb = next_urb(qh); | ||
| 1172 | |||
| 1173 | musb_ep_select(mbase, epnum); | ||
| 1174 | tx_csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1175 | |||
| 1176 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | ||
| 1177 | if (!urb) { | ||
| 1178 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | ||
| 1179 | goto finish; | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | pipe = urb->pipe; | ||
| 1183 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | ||
| 1184 | DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | ||
| 1185 | dma ? ", dma" : ""); | ||
| 1186 | |||
| 1187 | /* check for errors */ | ||
| 1188 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | ||
| 1189 | /* dma was disabled, fifo flushed */ | ||
| 1190 | DBG(3, "TX end %d stall\n", epnum); | ||
| 1191 | |||
| 1192 | /* stall; record URB status */ | ||
| 1193 | status = -EPIPE; | ||
| 1194 | |||
| 1195 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { | ||
| 1196 | /* (NON-ISO) dma was disabled, fifo flushed */ | ||
| 1197 | DBG(3, "TX 3strikes on ep=%d\n", epnum); | ||
| 1198 | |||
| 1199 | status = -ETIMEDOUT; | ||
| 1200 | |||
| 1201 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | ||
| 1202 | DBG(6, "TX end=%d device not responding\n", epnum); | ||
| 1203 | |||
| 1204 | /* NOTE: this code path would be a good place to PAUSE a | ||
| 1205 | * transfer, if there's some other (nonperiodic) tx urb | ||
| 1206 | * that could use this fifo. (dma complicates it...) | ||
| 1207 | * | ||
| 1208 | * if (bulk && qh->ring.next != &musb->out_bulk), then | ||
| 1209 | * we have a candidate... NAKing is *NOT* an error | ||
| 1210 | */ | ||
| 1211 | musb_ep_select(mbase, epnum); | ||
| 1212 | musb_writew(epio, MUSB_TXCSR, | ||
| 1213 | MUSB_TXCSR_H_WZC_BITS | ||
| 1214 | | MUSB_TXCSR_TXPKTRDY); | ||
| 1215 | goto finish; | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | if (status) { | ||
| 1219 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 1220 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
| 1221 | (void) musb->dma_controller->channel_abort(dma); | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | /* do the proper sequence to abort the transfer in the | ||
| 1225 | * usb core; the dma engine should already be stopped. | ||
| 1226 | */ | ||
| 1227 | musb_h_tx_flush_fifo(hw_ep); | ||
| 1228 | tx_csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 1229 | | MUSB_TXCSR_DMAENAB | ||
| 1230 | | MUSB_TXCSR_H_ERROR | ||
| 1231 | | MUSB_TXCSR_H_RXSTALL | ||
| 1232 | | MUSB_TXCSR_H_NAKTIMEOUT | ||
| 1233 | ); | ||
| 1234 | |||
| 1235 | musb_ep_select(mbase, epnum); | ||
| 1236 | musb_writew(epio, MUSB_TXCSR, tx_csr); | ||
| 1237 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
| 1238 | musb_writew(epio, MUSB_TXCSR, tx_csr); | ||
| 1239 | musb_writeb(epio, MUSB_TXINTERVAL, 0); | ||
| 1240 | |||
| 1241 | done = true; | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | /* second cppi case */ | ||
| 1245 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 1246 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | ||
| 1247 | goto finish; | ||
| 1248 | |||
| 1249 | } | ||
| 1250 | |||
| 1251 | /* REVISIT this looks wrong... */ | ||
| 1252 | if (!status || dma || usb_pipeisoc(pipe)) { | ||
| 1253 | if (dma) | ||
| 1254 | wLength = dma->actual_len; | ||
| 1255 | else | ||
| 1256 | wLength = qh->segsize; | ||
| 1257 | qh->offset += wLength; | ||
| 1258 | |||
| 1259 | if (usb_pipeisoc(pipe)) { | ||
| 1260 | struct usb_iso_packet_descriptor *d; | ||
| 1261 | |||
| 1262 | d = urb->iso_frame_desc + qh->iso_idx; | ||
| 1263 | d->actual_length = qh->segsize; | ||
| 1264 | if (++qh->iso_idx >= urb->number_of_packets) { | ||
| 1265 | done = true; | ||
| 1266 | } else { | ||
| 1267 | d++; | ||
| 1268 | buf = urb->transfer_buffer + d->offset; | ||
| 1269 | wLength = d->length; | ||
| 1270 | } | ||
| 1271 | } else if (dma) { | ||
| 1272 | done = true; | ||
| 1273 | } else { | ||
| 1274 | /* see if we need to send more data, or ZLP */ | ||
| 1275 | if (qh->segsize < qh->maxpacket) | ||
| 1276 | done = true; | ||
| 1277 | else if (qh->offset == urb->transfer_buffer_length | ||
| 1278 | && !(urb->transfer_flags | ||
| 1279 | & URB_ZERO_PACKET)) | ||
| 1280 | done = true; | ||
| 1281 | if (!done) { | ||
| 1282 | buf = urb->transfer_buffer | ||
| 1283 | + qh->offset; | ||
| 1284 | wLength = urb->transfer_buffer_length | ||
| 1285 | - qh->offset; | ||
| 1286 | } | ||
| 1287 | } | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | /* urb->status != -EINPROGRESS means request has been faulted, | ||
| 1291 | * so we must abort this transfer after cleanup | ||
| 1292 | */ | ||
| 1293 | if (urb->status != -EINPROGRESS) { | ||
| 1294 | done = true; | ||
| 1295 | if (status == 0) | ||
| 1296 | status = urb->status; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | if (done) { | ||
| 1300 | /* set status */ | ||
| 1301 | urb->status = status; | ||
| 1302 | urb->actual_length = qh->offset; | ||
| 1303 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | ||
| 1304 | |||
| 1305 | } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { | ||
| 1306 | /* WARN_ON(!buf); */ | ||
| 1307 | |||
| 1308 | /* REVISIT: some docs say that when hw_ep->tx_double_buffered, | ||
| 1309 | * (and presumably, fifo is not half-full) we should write TWO | ||
| 1310 | * packets before updating TXCSR ... other docs disagree ... | ||
| 1311 | */ | ||
| 1312 | /* PIO: start next packet in this URB */ | ||
| 1313 | wLength = min(qh->maxpacket, (u16) wLength); | ||
| 1314 | musb_write_fifo(hw_ep, wLength, buf); | ||
| 1315 | qh->segsize = wLength; | ||
| 1316 | |||
| 1317 | musb_ep_select(mbase, epnum); | ||
| 1318 | musb_writew(epio, MUSB_TXCSR, | ||
| 1319 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
| 1320 | } else | ||
| 1321 | DBG(1, "not complete, but dma enabled?\n"); | ||
| 1322 | |||
| 1323 | finish: | ||
| 1324 | return; | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | |||
| 1328 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 1329 | |||
| 1330 | /* Host side RX (IN) using Mentor DMA works as follows: | ||
| 1331 | submit_urb -> | ||
| 1332 | - if queue was empty, ProgramEndpoint | ||
| 1333 | - first IN token is sent out (by setting ReqPkt) | ||
| 1334 | LinuxIsr -> RxReady() | ||
| 1335 | /\ => first packet is received | ||
| 1336 | | - Set in mode 0 (DmaEnab, ~ReqPkt) | ||
| 1337 | | -> DMA Isr (transfer complete) -> RxReady() | ||
| 1338 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | ||
| 1339 | | - if urb not complete, send next IN token (ReqPkt) | ||
| 1340 | | | else complete urb. | ||
| 1341 | | | | ||
| 1342 | --------------------------- | ||
| 1343 | * | ||
| 1344 | * Nuances of mode 1: | ||
| 1345 | * For short packets, no ack (+RxPktRdy) is sent automatically | ||
| 1346 | * (even if AutoClear is ON) | ||
| 1347 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent | ||
| 1348 | * automatically => major problem, as collecting the next packet becomes | ||
| 1349 | * difficult. Hence mode 1 is not used. | ||
| 1350 | * | ||
| 1351 | * REVISIT | ||
| 1352 | * All we care about at this driver level is that | ||
| 1353 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; | ||
| 1354 | * (b) termination conditions are: short RX, or buffer full; | ||
| 1355 | * (c) fault modes include | ||
| 1356 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. | ||
| 1357 | * (and that endpoint's dma queue stops immediately) | ||
| 1358 | * - overflow (full, PLUS more bytes in the terminal packet) | ||
| 1359 | * | ||
| 1360 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would | ||
| 1361 | * thus be a great candidate for using mode 1 ... for all but the | ||
| 1362 | * last packet of one URB's transfer. | ||
| 1363 | */ | ||
| 1364 | |||
| 1365 | #endif | ||
| 1366 | |||
| 1367 | /* | ||
| 1368 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | ||
| 1369 | * and high-bandwidth IN transfer cases. | ||
| 1370 | */ | ||
| 1371 | void musb_host_rx(struct musb *musb, u8 epnum) | ||
| 1372 | { | ||
| 1373 | struct urb *urb; | ||
| 1374 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
| 1375 | void __iomem *epio = hw_ep->regs; | ||
| 1376 | struct musb_qh *qh = hw_ep->in_qh; | ||
| 1377 | size_t xfer_len; | ||
| 1378 | void __iomem *mbase = musb->mregs; | ||
| 1379 | int pipe; | ||
| 1380 | u16 rx_csr, val; | ||
| 1381 | bool iso_err = false; | ||
| 1382 | bool done = false; | ||
| 1383 | u32 status; | ||
| 1384 | struct dma_channel *dma; | ||
| 1385 | |||
| 1386 | musb_ep_select(mbase, epnum); | ||
| 1387 | |||
| 1388 | urb = next_urb(qh); | ||
| 1389 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; | ||
| 1390 | status = 0; | ||
| 1391 | xfer_len = 0; | ||
| 1392 | |||
| 1393 | rx_csr = musb_readw(epio, MUSB_RXCSR); | ||
| 1394 | val = rx_csr; | ||
| 1395 | |||
| 1396 | if (unlikely(!urb)) { | ||
| 1397 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least | ||
| 1398 | * usbtest #11 (unlinks) triggers it regularly, sometimes | ||
| 1399 | * with fifo full. (Only with DMA??) | ||
| 1400 | */ | ||
| 1401 | DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | ||
| 1402 | musb_readw(epio, MUSB_RXCOUNT)); | ||
| 1403 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | ||
| 1404 | return; | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | pipe = urb->pipe; | ||
| 1408 | |||
| 1409 | DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | ||
| 1410 | epnum, rx_csr, urb->actual_length, | ||
| 1411 | dma ? dma->actual_len : 0); | ||
| 1412 | |||
| 1413 | /* check for errors, concurrent stall & unlink is not really | ||
| 1414 | * handled yet! */ | ||
| 1415 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | ||
| 1416 | DBG(3, "RX end %d STALL\n", epnum); | ||
| 1417 | |||
| 1418 | /* stall; record URB status */ | ||
| 1419 | status = -EPIPE; | ||
| 1420 | |||
| 1421 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { | ||
| 1422 | DBG(3, "end %d RX proto error\n", epnum); | ||
| 1423 | |||
| 1424 | status = -EPROTO; | ||
| 1425 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | ||
| 1426 | |||
| 1427 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | ||
| 1428 | |||
| 1429 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | ||
| 1430 | /* NOTE this code path would be a good place to PAUSE a | ||
| 1431 | * transfer, if there's some other (nonperiodic) rx urb | ||
| 1432 | * that could use this fifo. (dma complicates it...) | ||
| 1433 | * | ||
| 1434 | * if (bulk && qh->ring.next != &musb->in_bulk), then | ||
| 1435 | * we have a candidate... NAKing is *NOT* an error | ||
| 1436 | */ | ||
| 1437 | DBG(6, "RX end %d NAK timeout\n", epnum); | ||
| 1438 | musb_ep_select(mbase, epnum); | ||
| 1439 | musb_writew(epio, MUSB_RXCSR, | ||
| 1440 | MUSB_RXCSR_H_WZC_BITS | ||
| 1441 | | MUSB_RXCSR_H_REQPKT); | ||
| 1442 | |||
| 1443 | goto finish; | ||
| 1444 | } else { | ||
| 1445 | DBG(4, "RX end %d ISO data error\n", epnum); | ||
| 1446 | /* packet error reported later */ | ||
| 1447 | iso_err = true; | ||
| 1448 | } | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | /* faults abort the transfer */ | ||
| 1452 | if (status) { | ||
| 1453 | /* clean up dma and collect transfer count */ | ||
| 1454 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 1455 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
| 1456 | (void) musb->dma_controller->channel_abort(dma); | ||
| 1457 | xfer_len = dma->actual_len; | ||
| 1458 | } | ||
| 1459 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | ||
| 1460 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | ||
| 1461 | done = true; | ||
| 1462 | goto finish; | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { | ||
| 1466 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ | ||
| 1467 | ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); | ||
| 1468 | goto finish; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | /* thorough shutdown for now ... given more precise fault handling | ||
| 1472 | * and better queueing support, we might keep a DMA pipeline going | ||
| 1473 | * while processing this irq for earlier completions. | ||
| 1474 | */ | ||
| 1475 | |||
| 1476 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ | ||
| 1477 | |||
| 1478 | #ifndef CONFIG_USB_INVENTRA_DMA | ||
| 1479 | if (rx_csr & MUSB_RXCSR_H_REQPKT) { | ||
| 1480 | /* REVISIT this happened for a while on some short reads... | ||
| 1481 | * the cleanup still needs investigation... looks bad... | ||
| 1482 | * and also duplicates dma cleanup code above ... plus, | ||
| 1483 | * shouldn't this be the "half full" double buffer case? | ||
| 1484 | */ | ||
| 1485 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
| 1486 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
| 1487 | (void) musb->dma_controller->channel_abort(dma); | ||
| 1488 | xfer_len = dma->actual_len; | ||
| 1489 | done = true; | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | ||
| 1493 | xfer_len, dma ? ", dma" : ""); | ||
| 1494 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | ||
| 1495 | |||
| 1496 | musb_ep_select(mbase, epnum); | ||
| 1497 | musb_writew(epio, MUSB_RXCSR, | ||
| 1498 | MUSB_RXCSR_H_WZC_BITS | rx_csr); | ||
| 1499 | } | ||
| 1500 | #endif | ||
| 1501 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { | ||
| 1502 | xfer_len = dma->actual_len; | ||
| 1503 | |||
| 1504 | val &= ~(MUSB_RXCSR_DMAENAB | ||
| 1505 | | MUSB_RXCSR_H_AUTOREQ | ||
| 1506 | | MUSB_RXCSR_AUTOCLEAR | ||
| 1507 | | MUSB_RXCSR_RXPKTRDY); | ||
| 1508 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); | ||
| 1509 | |||
| 1510 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 1511 | /* done if urb buffer is full or short packet is recd */ | ||
| 1512 | done = (urb->actual_length + xfer_len >= | ||
| 1513 | urb->transfer_buffer_length | ||
| 1514 | || dma->actual_len < qh->maxpacket); | ||
| 1515 | |||
| 1516 | /* send IN token for next packet, without AUTOREQ */ | ||
| 1517 | if (!done) { | ||
| 1518 | val |= MUSB_RXCSR_H_REQPKT; | ||
| 1519 | musb_writew(epio, MUSB_RXCSR, | ||
| 1520 | MUSB_RXCSR_H_WZC_BITS | val); | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | ||
| 1524 | done ? "off" : "reset", | ||
| 1525 | musb_readw(epio, MUSB_RXCSR), | ||
| 1526 | musb_readw(epio, MUSB_RXCOUNT)); | ||
| 1527 | #else | ||
| 1528 | done = true; | ||
| 1529 | #endif | ||
| 1530 | } else if (urb->status == -EINPROGRESS) { | ||
| 1531 | /* if no errors, be sure a packet is ready for unloading */ | ||
| 1532 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { | ||
| 1533 | status = -EPROTO; | ||
| 1534 | ERR("Rx interrupt with no errors or packet!\n"); | ||
| 1535 | |||
| 1536 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ | ||
| 1537 | |||
| 1538 | /* SCRUB (RX) */ | ||
| 1539 | /* do the proper sequence to abort the transfer */ | ||
| 1540 | musb_ep_select(mbase, epnum); | ||
| 1541 | val &= ~MUSB_RXCSR_H_REQPKT; | ||
| 1542 | musb_writew(epio, MUSB_RXCSR, val); | ||
| 1543 | goto finish; | ||
| 1544 | } | ||
| 1545 | |||
| 1546 | /* we are expecting IN packets */ | ||
| 1547 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
| 1548 | if (dma) { | ||
| 1549 | struct dma_controller *c; | ||
| 1550 | u16 rx_count; | ||
| 1551 | int ret; | ||
| 1552 | |||
| 1553 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | ||
| 1554 | |||
| 1555 | DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", | ||
| 1556 | epnum, rx_count, | ||
| 1557 | urb->transfer_dma | ||
| 1558 | + urb->actual_length, | ||
| 1559 | qh->offset, | ||
| 1560 | urb->transfer_buffer_length); | ||
| 1561 | |||
| 1562 | c = musb->dma_controller; | ||
| 1563 | |||
| 1564 | dma->desired_mode = 0; | ||
| 1565 | #ifdef USE_MODE1 | ||
| 1566 | /* because of the issue below, mode 1 will | ||
| 1567 | * only rarely behave with correct semantics. | ||
| 1568 | */ | ||
| 1569 | if ((urb->transfer_flags & | ||
| 1570 | URB_SHORT_NOT_OK) | ||
| 1571 | && (urb->transfer_buffer_length - | ||
| 1572 | urb->actual_length) | ||
| 1573 | > qh->maxpacket) | ||
| 1574 | dma->desired_mode = 1; | ||
| 1575 | #endif | ||
| 1576 | |||
| 1577 | /* Disadvantage of using mode 1: | ||
| 1578 | * It's basically usable only for mass storage class; essentially all | ||
| 1579 | * other protocols also terminate transfers on short packets. | ||
| 1580 | * | ||
| 1581 | * Details: | ||
| 1582 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) | ||
| 1583 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try | ||
| 1584 | * to use the extra IN token to grab the last packet using mode 0, then | ||
| 1585 | * the problem is that you cannot be sure when the device will send the | ||
| 1586 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon | ||
| 1587 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 | ||
| 1588 | * transfer, while sometimes it is recd just a little late so that if you | ||
| 1589 | * try to configure for mode 0 soon after the mode 1 transfer is | ||
| 1590 | * completed, you will find rxcount 0. Okay, so you might think why not | ||
| 1591 | * wait for an interrupt when the pkt is recd. Well, you won't get any! | ||
| 1592 | */ | ||
| 1593 | |||
| 1594 | val = musb_readw(epio, MUSB_RXCSR); | ||
| 1595 | val &= ~MUSB_RXCSR_H_REQPKT; | ||
| 1596 | |||
| 1597 | if (dma->desired_mode == 0) | ||
| 1598 | val &= ~MUSB_RXCSR_H_AUTOREQ; | ||
| 1599 | else | ||
| 1600 | val |= MUSB_RXCSR_H_AUTOREQ; | ||
| 1601 | val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; | ||
| 1602 | |||
| 1603 | musb_writew(epio, MUSB_RXCSR, | ||
| 1604 | MUSB_RXCSR_H_WZC_BITS | val); | ||
| 1605 | |||
| 1606 | /* REVISIT if when actual_length != 0, | ||
| 1607 | * transfer_buffer_length needs to be | ||
| 1608 | * adjusted first... | ||
| 1609 | */ | ||
| 1610 | ret = c->channel_program( | ||
| 1611 | dma, qh->maxpacket, | ||
| 1612 | dma->desired_mode, | ||
| 1613 | urb->transfer_dma | ||
| 1614 | + urb->actual_length, | ||
| 1615 | (dma->desired_mode == 0) | ||
| 1616 | ? rx_count | ||
| 1617 | : urb->transfer_buffer_length); | ||
| 1618 | |||
| 1619 | if (!ret) { | ||
| 1620 | c->channel_release(dma); | ||
| 1621 | hw_ep->rx_channel = NULL; | ||
| 1622 | dma = NULL; | ||
| 1623 | /* REVISIT reset CSR */ | ||
| 1624 | } | ||
| 1625 | } | ||
| 1626 | #endif /* Mentor DMA */ | ||
| 1627 | |||
| 1628 | if (!dma) { | ||
| 1629 | done = musb_host_packet_rx(musb, urb, | ||
| 1630 | epnum, iso_err); | ||
| 1631 | DBG(6, "read %spacket\n", done ? "last " : ""); | ||
| 1632 | } | ||
| 1633 | } | ||
| 1634 | |||
| 1635 | if (dma && usb_pipeisoc(pipe)) { | ||
| 1636 | struct usb_iso_packet_descriptor *d; | ||
| 1637 | int iso_stat = status; | ||
| 1638 | |||
| 1639 | d = urb->iso_frame_desc + qh->iso_idx; | ||
| 1640 | d->actual_length += xfer_len; | ||
| 1641 | if (iso_err) { | ||
| 1642 | iso_stat = -EILSEQ; | ||
| 1643 | urb->error_count++; | ||
| 1644 | } | ||
| 1645 | d->status = iso_stat; | ||
| 1646 | } | ||
| 1647 | |||
| 1648 | finish: | ||
| 1649 | urb->actual_length += xfer_len; | ||
| 1650 | qh->offset += xfer_len; | ||
| 1651 | if (done) { | ||
| 1652 | if (urb->status == -EINPROGRESS) | ||
| 1653 | urb->status = status; | ||
| 1654 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); | ||
| 1655 | } | ||
| 1656 | } | ||
| 1657 | |||
| 1658 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. | ||
| 1659 | * the software schedule associates multiple such nodes with a given | ||
| 1660 | * host side hardware endpoint + direction; scheduling may activate | ||
| 1661 | * that hardware endpoint. | ||
| 1662 | */ | ||
| 1663 | static int musb_schedule( | ||
| 1664 | struct musb *musb, | ||
| 1665 | struct musb_qh *qh, | ||
| 1666 | int is_in) | ||
| 1667 | { | ||
| 1668 | int idle; | ||
| 1669 | int best_diff; | ||
| 1670 | int best_end, epnum; | ||
| 1671 | struct musb_hw_ep *hw_ep = NULL; | ||
| 1672 | struct list_head *head = NULL; | ||
| 1673 | |||
| 1674 | /* use fixed hardware for control and bulk */ | ||
| 1675 | switch (qh->type) { | ||
| 1676 | case USB_ENDPOINT_XFER_CONTROL: | ||
| 1677 | head = &musb->control; | ||
| 1678 | hw_ep = musb->control_ep; | ||
| 1679 | break; | ||
| 1680 | case USB_ENDPOINT_XFER_BULK: | ||
| 1681 | hw_ep = musb->bulk_ep; | ||
| 1682 | if (is_in) | ||
| 1683 | head = &musb->in_bulk; | ||
| 1684 | else | ||
| 1685 | head = &musb->out_bulk; | ||
| 1686 | break; | ||
| 1687 | } | ||
| 1688 | if (head) { | ||
| 1689 | idle = list_empty(head); | ||
| 1690 | list_add_tail(&qh->ring, head); | ||
| 1691 | goto success; | ||
| 1692 | } | ||
| 1693 | |||
| 1694 | /* else, periodic transfers get muxed to other endpoints */ | ||
| 1695 | |||
| 1696 | /* FIXME this doesn't consider direction, so it can only | ||
| 1697 | * work for one half of the endpoint hardware, and assumes | ||
| 1698 | * the previous cases handled all non-shared endpoints... | ||
| 1699 | */ | ||
| 1700 | |||
| 1701 | /* we know this qh hasn't been scheduled, so all we need to do | ||
| 1702 | * is choose which hardware endpoint to put it on ... | ||
| 1703 | * | ||
| 1704 | * REVISIT what we really want here is a regular schedule tree | ||
| 1705 | * like e.g. OHCI uses, but for now musb->periodic is just an | ||
| 1706 | * array of the _single_ logical endpoint associated with a | ||
| 1707 | * given physical one (identity mapping logical->physical). | ||
| 1708 | * | ||
| 1709 | * that simplistic approach makes TT scheduling a lot simpler; | ||
| 1710 | * there is none, and thus none of its complexity... | ||
| 1711 | */ | ||
| 1712 | best_diff = 4096; | ||
| 1713 | best_end = -1; | ||
| 1714 | |||
| 1715 | for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { | ||
| 1716 | int diff; | ||
| 1717 | |||
| 1718 | if (musb->periodic[epnum]) | ||
| 1719 | continue; | ||
| 1720 | hw_ep = &musb->endpoints[epnum]; | ||
| 1721 | if (hw_ep == musb->bulk_ep) | ||
| 1722 | continue; | ||
| 1723 | |||
| 1724 | if (is_in) | ||
| 1725 | diff = hw_ep->max_packet_sz_rx - qh->maxpacket; | ||
| 1726 | else | ||
| 1727 | diff = hw_ep->max_packet_sz_tx - qh->maxpacket; | ||
| 1728 | |||
| 1729 | if (diff > 0 && best_diff > diff) { | ||
| 1730 | best_diff = diff; | ||
| 1731 | best_end = epnum; | ||
| 1732 | } | ||
| 1733 | } | ||
| 1734 | if (best_end < 0) | ||
| 1735 | return -ENOSPC; | ||
| 1736 | |||
| 1737 | idle = 1; | ||
| 1738 | hw_ep = musb->endpoints + best_end; | ||
| 1739 | musb->periodic[best_end] = qh; | ||
| 1740 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); | ||
| 1741 | success: | ||
| 1742 | qh->hw_ep = hw_ep; | ||
| 1743 | qh->hep->hcpriv = qh; | ||
| 1744 | if (idle) | ||
| 1745 | musb_start_urb(musb, is_in, qh); | ||
| 1746 | return 0; | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | static int musb_urb_enqueue( | ||
| 1750 | struct usb_hcd *hcd, | ||
| 1751 | struct urb *urb, | ||
| 1752 | gfp_t mem_flags) | ||
| 1753 | { | ||
| 1754 | unsigned long flags; | ||
| 1755 | struct musb *musb = hcd_to_musb(hcd); | ||
| 1756 | struct usb_host_endpoint *hep = urb->ep; | ||
| 1757 | struct musb_qh *qh = hep->hcpriv; | ||
| 1758 | struct usb_endpoint_descriptor *epd = &hep->desc; | ||
| 1759 | int ret; | ||
| 1760 | unsigned type_reg; | ||
| 1761 | unsigned interval; | ||
| 1762 | |||
| 1763 | /* host role must be active */ | ||
| 1764 | if (!is_host_active(musb) || !musb->is_active) | ||
| 1765 | return -ENODEV; | ||
| 1766 | |||
| 1767 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1768 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | ||
| 1769 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1770 | if (ret) | ||
| 1771 | return ret; | ||
| 1772 | |||
| 1773 | /* DMA mapping was already done, if needed, and this urb is on | ||
| 1774 | * hep->urb_list ... so there's little to do unless hep wasn't | ||
| 1775 | * yet scheduled onto a live qh. | ||
| 1776 | * | ||
| 1777 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | ||
| 1778 | * disabled, testing for empty qh->ring and avoiding qh setup costs | ||
| 1779 | * except for the first urb queued after a config change. | ||
| 1780 | */ | ||
| 1781 | if (qh) { | ||
| 1782 | urb->hcpriv = qh; | ||
| 1783 | return 0; | ||
| 1784 | } | ||
| 1785 | |||
| 1786 | /* Allocate and initialize qh, minimizing the work done each time | ||
| 1787 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | ||
| 1788 | * | ||
| 1789 | * REVISIT consider a dedicated qh kmem_cache, so it's harder | ||
| 1790 | * for bugs in other kernel code to break this driver... | ||
| 1791 | */ | ||
| 1792 | qh = kzalloc(sizeof *qh, mem_flags); | ||
| 1793 | if (!qh) { | ||
| 1794 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
| 1795 | return -ENOMEM; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | qh->hep = hep; | ||
| 1799 | qh->dev = urb->dev; | ||
| 1800 | INIT_LIST_HEAD(&qh->ring); | ||
| 1801 | qh->is_ready = 1; | ||
| 1802 | |||
| 1803 | qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); | ||
| 1804 | |||
| 1805 | /* no high bandwidth support yet */ | ||
| 1806 | if (qh->maxpacket & ~0x7ff) { | ||
| 1807 | ret = -EMSGSIZE; | ||
| 1808 | goto done; | ||
| 1809 | } | ||
| 1810 | |||
| 1811 | qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | ||
| 1812 | qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; | ||
| 1813 | |||
| 1814 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | ||
| 1815 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | ||
| 1816 | |||
| 1817 | /* precompute rxtype/txtype/type0 register */ | ||
| 1818 | type_reg = (qh->type << 4) | qh->epnum; | ||
| 1819 | switch (urb->dev->speed) { | ||
| 1820 | case USB_SPEED_LOW: | ||
| 1821 | type_reg |= 0xc0; | ||
| 1822 | break; | ||
| 1823 | case USB_SPEED_FULL: | ||
| 1824 | type_reg |= 0x80; | ||
| 1825 | break; | ||
| 1826 | default: | ||
| 1827 | type_reg |= 0x40; | ||
| 1828 | } | ||
| 1829 | qh->type_reg = type_reg; | ||
| 1830 | |||
| 1831 | /* precompute rxinterval/txinterval register */ | ||
| 1832 | interval = min((u8)16, epd->bInterval); /* log encoding */ | ||
| 1833 | switch (qh->type) { | ||
| 1834 | case USB_ENDPOINT_XFER_INT: | ||
| 1835 | /* fullspeed uses linear encoding */ | ||
| 1836 | if (USB_SPEED_FULL == urb->dev->speed) { | ||
| 1837 | interval = epd->bInterval; | ||
| 1838 | if (!interval) | ||
| 1839 | interval = 1; | ||
| 1840 | } | ||
| 1841 | /* FALLTHROUGH */ | ||
| 1842 | case USB_ENDPOINT_XFER_ISOC: | ||
| 1843 | /* iso always uses log encoding */ | ||
| 1844 | break; | ||
| 1845 | default: | ||
| 1846 | /* REVISIT we actually want to use NAK limits, hinting to the | ||
| 1847 | * transfer scheduling logic to try some other qh, e.g. try | ||
| 1848 | * for 2 msec first: | ||
| 1849 | * | ||
| 1850 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; | ||
| 1851 | * | ||
| 1852 | * The downside of disabling this is that transfer scheduling | ||
| 1853 | * gets VERY unfair for nonperiodic transfers; a misbehaving | ||
| 1854 | * peripheral could make that hurt. Or for reads, one that's | ||
| 1855 | * perfectly normal: network and other drivers keep reads | ||
| 1856 | * posted at all times, having one pending for a week should | ||
| 1857 | * be perfectly safe. | ||
| 1858 | * | ||
| 1859 | * The upside of disabling it is avoidng transfer scheduling | ||
| 1860 | * code to put this aside for while. | ||
| 1861 | */ | ||
| 1862 | interval = 0; | ||
| 1863 | } | ||
| 1864 | qh->intv_reg = interval; | ||
| 1865 | |||
| 1866 | /* precompute addressing for external hub/tt ports */ | ||
| 1867 | if (musb->is_multipoint) { | ||
| 1868 | struct usb_device *parent = urb->dev->parent; | ||
| 1869 | |||
| 1870 | if (parent != hcd->self.root_hub) { | ||
| 1871 | qh->h_addr_reg = (u8) parent->devnum; | ||
| 1872 | |||
| 1873 | /* set up tt info if needed */ | ||
| 1874 | if (urb->dev->tt) { | ||
| 1875 | qh->h_port_reg = (u8) urb->dev->ttport; | ||
| 1876 | qh->h_addr_reg |= 0x80; | ||
| 1877 | } | ||
| 1878 | } | ||
| 1879 | } | ||
| 1880 | |||
| 1881 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. | ||
| 1882 | * until we get real dma queues (with an entry for each urb/buffer), | ||
| 1883 | * we only have work to do in the former case. | ||
| 1884 | */ | ||
| 1885 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1886 | if (hep->hcpriv) { | ||
| 1887 | /* some concurrent activity submitted another urb to hep... | ||
| 1888 | * odd, rare, error prone, but legal. | ||
| 1889 | */ | ||
| 1890 | kfree(qh); | ||
| 1891 | ret = 0; | ||
| 1892 | } else | ||
| 1893 | ret = musb_schedule(musb, qh, | ||
| 1894 | epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); | ||
| 1895 | |||
| 1896 | if (ret == 0) { | ||
| 1897 | urb->hcpriv = qh; | ||
| 1898 | /* FIXME set urb->start_frame for iso/intr, it's tested in | ||
| 1899 | * musb_start_urb(), but otherwise only konicawc cares ... | ||
| 1900 | */ | ||
| 1901 | } | ||
| 1902 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1903 | |||
| 1904 | done: | ||
| 1905 | if (ret != 0) { | ||
| 1906 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
| 1907 | kfree(qh); | ||
| 1908 | } | ||
| 1909 | return ret; | ||
| 1910 | } | ||
| 1911 | |||
| 1912 | |||
| 1913 | /* | ||
| 1914 | * abort a transfer that's at the head of a hardware queue. | ||
| 1915 | * called with controller locked, irqs blocked | ||
| 1916 | * that hardware queue advances to the next transfer, unless prevented | ||
| 1917 | */ | ||
| 1918 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | ||
| 1919 | { | ||
| 1920 | struct musb_hw_ep *ep = qh->hw_ep; | ||
| 1921 | void __iomem *epio = ep->regs; | ||
| 1922 | unsigned hw_end = ep->epnum; | ||
| 1923 | void __iomem *regs = ep->musb->mregs; | ||
| 1924 | u16 csr; | ||
| 1925 | int status = 0; | ||
| 1926 | |||
| 1927 | musb_ep_select(regs, hw_end); | ||
| 1928 | |||
| 1929 | if (is_dma_capable()) { | ||
| 1930 | struct dma_channel *dma; | ||
| 1931 | |||
| 1932 | dma = is_in ? ep->rx_channel : ep->tx_channel; | ||
| 1933 | if (dma) { | ||
| 1934 | status = ep->musb->dma_controller->channel_abort(dma); | ||
| 1935 | DBG(status ? 1 : 3, | ||
| 1936 | "abort %cX%d DMA for urb %p --> %d\n", | ||
| 1937 | is_in ? 'R' : 'T', ep->epnum, | ||
| 1938 | urb, status); | ||
| 1939 | urb->actual_length += dma->actual_len; | ||
| 1940 | } | ||
| 1941 | } | ||
| 1942 | |||
| 1943 | /* turn off DMA requests, discard state, stop polling ... */ | ||
| 1944 | if (is_in) { | ||
| 1945 | /* giveback saves bulk toggle */ | ||
| 1946 | csr = musb_h_flush_rxfifo(ep, 0); | ||
| 1947 | |||
| 1948 | /* REVISIT we still get an irq; should likely clear the | ||
| 1949 | * endpoint's irq status here to avoid bogus irqs. | ||
| 1950 | * clearing that status is platform-specific... | ||
| 1951 | */ | ||
| 1952 | } else { | ||
| 1953 | musb_h_tx_flush_fifo(ep); | ||
| 1954 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1955 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
| 1956 | | MUSB_TXCSR_DMAENAB | ||
| 1957 | | MUSB_TXCSR_H_RXSTALL | ||
| 1958 | | MUSB_TXCSR_H_NAKTIMEOUT | ||
| 1959 | | MUSB_TXCSR_H_ERROR | ||
| 1960 | | MUSB_TXCSR_TXPKTRDY); | ||
| 1961 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 1962 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
| 1963 | musb_writew(epio, MUSB_TXCSR, csr); | ||
| 1964 | /* flush cpu writebuffer */ | ||
| 1965 | csr = musb_readw(epio, MUSB_TXCSR); | ||
| 1966 | } | ||
| 1967 | if (status == 0) | ||
| 1968 | musb_advance_schedule(ep->musb, urb, ep, is_in); | ||
| 1969 | return status; | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
| 1973 | { | ||
| 1974 | struct musb *musb = hcd_to_musb(hcd); | ||
| 1975 | struct musb_qh *qh; | ||
| 1976 | struct list_head *sched; | ||
| 1977 | unsigned long flags; | ||
| 1978 | int ret; | ||
| 1979 | |||
| 1980 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | ||
| 1981 | usb_pipedevice(urb->pipe), | ||
| 1982 | usb_pipeendpoint(urb->pipe), | ||
| 1983 | usb_pipein(urb->pipe) ? "in" : "out"); | ||
| 1984 | |||
| 1985 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1986 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
| 1987 | if (ret) | ||
| 1988 | goto done; | ||
| 1989 | |||
| 1990 | qh = urb->hcpriv; | ||
| 1991 | if (!qh) | ||
| 1992 | goto done; | ||
| 1993 | |||
| 1994 | /* Any URB not actively programmed into endpoint hardware can be | ||
| 1995 | * immediately given back. Such an URB must be at the head of its | ||
| 1996 | * endpoint queue, unless someday we get real DMA queues. And even | ||
| 1997 | * then, it might not be known to the hardware... | ||
| 1998 | * | ||
| 1999 | * Otherwise abort current transfer, pending dma, etc.; urb->status | ||
| 2000 | * has already been updated. This is a synchronous abort; it'd be | ||
| 2001 | * OK to hold off until after some IRQ, though. | ||
| 2002 | */ | ||
| 2003 | if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) | ||
| 2004 | ret = -EINPROGRESS; | ||
| 2005 | else { | ||
| 2006 | switch (qh->type) { | ||
| 2007 | case USB_ENDPOINT_XFER_CONTROL: | ||
| 2008 | sched = &musb->control; | ||
| 2009 | break; | ||
| 2010 | case USB_ENDPOINT_XFER_BULK: | ||
| 2011 | if (usb_pipein(urb->pipe)) | ||
| 2012 | sched = &musb->in_bulk; | ||
| 2013 | else | ||
| 2014 | sched = &musb->out_bulk; | ||
| 2015 | break; | ||
| 2016 | default: | ||
| 2017 | /* REVISIT when we get a schedule tree, periodic | ||
| 2018 | * transfers won't always be at the head of a | ||
| 2019 | * singleton queue... | ||
| 2020 | */ | ||
| 2021 | sched = NULL; | ||
| 2022 | break; | ||
| 2023 | } | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
| 2027 | if (ret < 0 || (sched && qh != first_qh(sched))) { | ||
| 2028 | int ready = qh->is_ready; | ||
| 2029 | |||
| 2030 | ret = 0; | ||
| 2031 | qh->is_ready = 0; | ||
| 2032 | __musb_giveback(musb, urb, 0); | ||
| 2033 | qh->is_ready = ready; | ||
| 2034 | } else | ||
| 2035 | ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | ||
| 2036 | done: | ||
| 2037 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2038 | return ret; | ||
| 2039 | } | ||
| 2040 | |||
| 2041 | /* disable an endpoint */ | ||
| 2042 | static void | ||
| 2043 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | ||
| 2044 | { | ||
| 2045 | u8 epnum = hep->desc.bEndpointAddress; | ||
| 2046 | unsigned long flags; | ||
| 2047 | struct musb *musb = hcd_to_musb(hcd); | ||
| 2048 | u8 is_in = epnum & USB_DIR_IN; | ||
| 2049 | struct musb_qh *qh = hep->hcpriv; | ||
| 2050 | struct urb *urb, *tmp; | ||
| 2051 | struct list_head *sched; | ||
| 2052 | |||
| 2053 | if (!qh) | ||
| 2054 | return; | ||
| 2055 | |||
| 2056 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2057 | |||
| 2058 | switch (qh->type) { | ||
| 2059 | case USB_ENDPOINT_XFER_CONTROL: | ||
| 2060 | sched = &musb->control; | ||
| 2061 | break; | ||
| 2062 | case USB_ENDPOINT_XFER_BULK: | ||
| 2063 | if (is_in) | ||
| 2064 | sched = &musb->in_bulk; | ||
| 2065 | else | ||
| 2066 | sched = &musb->out_bulk; | ||
| 2067 | break; | ||
| 2068 | default: | ||
| 2069 | /* REVISIT when we get a schedule tree, periodic transfers | ||
| 2070 | * won't always be at the head of a singleton queue... | ||
| 2071 | */ | ||
| 2072 | sched = NULL; | ||
| 2073 | break; | ||
| 2074 | } | ||
| 2075 | |||
| 2076 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
| 2077 | |||
| 2078 | /* kick first urb off the hardware, if needed */ | ||
| 2079 | qh->is_ready = 0; | ||
| 2080 | if (!sched || qh == first_qh(sched)) { | ||
| 2081 | urb = next_urb(qh); | ||
| 2082 | |||
| 2083 | /* make software (then hardware) stop ASAP */ | ||
| 2084 | if (!urb->unlinked) | ||
| 2085 | urb->status = -ESHUTDOWN; | ||
| 2086 | |||
| 2087 | /* cleanup */ | ||
| 2088 | musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | ||
| 2089 | } else | ||
| 2090 | urb = NULL; | ||
| 2091 | |||
| 2092 | /* then just nuke all the others */ | ||
| 2093 | list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) | ||
| 2094 | musb_giveback(qh, urb, -ESHUTDOWN); | ||
| 2095 | |||
| 2096 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2097 | } | ||
| 2098 | |||
| 2099 | static int musb_h_get_frame_number(struct usb_hcd *hcd) | ||
| 2100 | { | ||
| 2101 | struct musb *musb = hcd_to_musb(hcd); | ||
| 2102 | |||
| 2103 | return musb_readw(musb->mregs, MUSB_FRAME); | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | static int musb_h_start(struct usb_hcd *hcd) | ||
| 2107 | { | ||
| 2108 | struct musb *musb = hcd_to_musb(hcd); | ||
| 2109 | |||
| 2110 | /* NOTE: musb_start() is called when the hub driver turns | ||
| 2111 | * on port power, or when (OTG) peripheral starts. | ||
| 2112 | */ | ||
| 2113 | hcd->state = HC_STATE_RUNNING; | ||
| 2114 | musb->port1_status = 0; | ||
| 2115 | return 0; | ||
| 2116 | } | ||
| 2117 | |||
| 2118 | static void musb_h_stop(struct usb_hcd *hcd) | ||
| 2119 | { | ||
| 2120 | musb_stop(hcd_to_musb(hcd)); | ||
| 2121 | hcd->state = HC_STATE_HALT; | ||
| 2122 | } | ||
| 2123 | |||
| 2124 | static int musb_bus_suspend(struct usb_hcd *hcd) | ||
| 2125 | { | ||
| 2126 | struct musb *musb = hcd_to_musb(hcd); | ||
| 2127 | |||
| 2128 | if (musb->xceiv.state == OTG_STATE_A_SUSPEND) | ||
| 2129 | return 0; | ||
| 2130 | |||
| 2131 | if (is_host_active(musb) && musb->is_active) { | ||
| 2132 | WARNING("trying to suspend as %s is_active=%i\n", | ||
| 2133 | otg_state_string(musb), musb->is_active); | ||
| 2134 | return -EBUSY; | ||
| 2135 | } else | ||
| 2136 | return 0; | ||
| 2137 | } | ||
| 2138 | |||
| 2139 | static int musb_bus_resume(struct usb_hcd *hcd) | ||
| 2140 | { | ||
| 2141 | /* resuming child port does the work */ | ||
| 2142 | return 0; | ||
| 2143 | } | ||
| 2144 | |||
| 2145 | const struct hc_driver musb_hc_driver = { | ||
| 2146 | .description = "musb-hcd", | ||
| 2147 | .product_desc = "MUSB HDRC host driver", | ||
| 2148 | .hcd_priv_size = sizeof(struct musb), | ||
| 2149 | .flags = HCD_USB2 | HCD_MEMORY, | ||
| 2150 | |||
| 2151 | /* not using irq handler or reset hooks from usbcore, since | ||
| 2152 | * those must be shared with peripheral code for OTG configs | ||
| 2153 | */ | ||
| 2154 | |||
| 2155 | .start = musb_h_start, | ||
| 2156 | .stop = musb_h_stop, | ||
| 2157 | |||
| 2158 | .get_frame_number = musb_h_get_frame_number, | ||
| 2159 | |||
| 2160 | .urb_enqueue = musb_urb_enqueue, | ||
| 2161 | .urb_dequeue = musb_urb_dequeue, | ||
| 2162 | .endpoint_disable = musb_h_disable, | ||
| 2163 | |||
| 2164 | .hub_status_data = musb_hub_status_data, | ||
| 2165 | .hub_control = musb_hub_control, | ||
| 2166 | .bus_suspend = musb_bus_suspend, | ||
| 2167 | .bus_resume = musb_bus_resume, | ||
| 2168 | /* .start_port_reset = NULL, */ | ||
| 2169 | /* .hub_irq_enable = NULL, */ | ||
| 2170 | }; | ||
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h new file mode 100644 index 000000000000..77bcdb9d5b32 --- /dev/null +++ b/drivers/usb/musb/musb_host.h | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver host defines | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef _MUSB_HOST_H | ||
| 36 | #define _MUSB_HOST_H | ||
| 37 | |||
| 38 | static inline struct usb_hcd *musb_to_hcd(struct musb *musb) | ||
| 39 | { | ||
| 40 | return container_of((void *) musb, struct usb_hcd, hcd_priv); | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline struct musb *hcd_to_musb(struct usb_hcd *hcd) | ||
| 44 | { | ||
| 45 | return (struct musb *) (hcd->hcd_priv); | ||
| 46 | } | ||
| 47 | |||
| 48 | /* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */ | ||
| 49 | struct musb_qh { | ||
| 50 | struct usb_host_endpoint *hep; /* usbcore info */ | ||
| 51 | struct usb_device *dev; | ||
| 52 | struct musb_hw_ep *hw_ep; /* current binding */ | ||
| 53 | |||
| 54 | struct list_head ring; /* of musb_qh */ | ||
| 55 | /* struct musb_qh *next; */ /* for periodic tree */ | ||
| 56 | |||
| 57 | unsigned offset; /* in urb->transfer_buffer */ | ||
| 58 | unsigned segsize; /* current xfer fragment */ | ||
| 59 | |||
| 60 | u8 type_reg; /* {rx,tx} type register */ | ||
| 61 | u8 intv_reg; /* {rx,tx} interval register */ | ||
| 62 | u8 addr_reg; /* device address register */ | ||
| 63 | u8 h_addr_reg; /* hub address register */ | ||
| 64 | u8 h_port_reg; /* hub port register */ | ||
| 65 | |||
| 66 | u8 is_ready; /* safe to modify hw_ep */ | ||
| 67 | u8 type; /* XFERTYPE_* */ | ||
| 68 | u8 epnum; | ||
| 69 | u16 maxpacket; | ||
| 70 | u16 frame; /* for periodic schedule */ | ||
| 71 | unsigned iso_idx; /* in urb->iso_frame_desc[] */ | ||
| 72 | }; | ||
| 73 | |||
| 74 | /* map from control or bulk queue head to the first qh on that ring */ | ||
| 75 | static inline struct musb_qh *first_qh(struct list_head *q) | ||
| 76 | { | ||
| 77 | if (list_empty(q)) | ||
| 78 | return NULL; | ||
| 79 | return list_entry(q->next, struct musb_qh, ring); | ||
| 80 | } | ||
| 81 | |||
| 82 | |||
| 83 | extern void musb_root_disconnect(struct musb *musb); | ||
| 84 | |||
| 85 | struct usb_hcd; | ||
| 86 | |||
| 87 | extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf); | ||
| 88 | extern int musb_hub_control(struct usb_hcd *hcd, | ||
| 89 | u16 typeReq, u16 wValue, u16 wIndex, | ||
| 90 | char *buf, u16 wLength); | ||
| 91 | |||
| 92 | extern const struct hc_driver musb_hc_driver; | ||
| 93 | |||
| 94 | static inline struct urb *next_urb(struct musb_qh *qh) | ||
| 95 | { | ||
| 96 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 97 | struct list_head *queue; | ||
| 98 | |||
| 99 | if (!qh) | ||
| 100 | return NULL; | ||
| 101 | queue = &qh->hep->urb_list; | ||
| 102 | if (list_empty(queue)) | ||
| 103 | return NULL; | ||
| 104 | return list_entry(queue->next, struct urb, urb_list); | ||
| 105 | #else | ||
| 106 | return NULL; | ||
| 107 | #endif | ||
| 108 | } | ||
| 109 | |||
| 110 | #endif /* _MUSB_HOST_H */ | ||
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h new file mode 100644 index 000000000000..6bbedae83af8 --- /dev/null +++ b/drivers/usb/musb/musb_io.h | |||
| @@ -0,0 +1,115 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver register I/O | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __MUSB_LINUX_PLATFORM_ARCH_H__ | ||
| 36 | #define __MUSB_LINUX_PLATFORM_ARCH_H__ | ||
| 37 | |||
| 38 | #include <linux/io.h> | ||
| 39 | |||
| 40 | #ifndef CONFIG_ARM | ||
| 41 | static inline void readsl(const void __iomem *addr, void *buf, int len) | ||
| 42 | { insl((unsigned long)addr, buf, len); } | ||
| 43 | static inline void readsw(const void __iomem *addr, void *buf, int len) | ||
| 44 | { insw((unsigned long)addr, buf, len); } | ||
| 45 | static inline void readsb(const void __iomem *addr, void *buf, int len) | ||
| 46 | { insb((unsigned long)addr, buf, len); } | ||
| 47 | |||
| 48 | static inline void writesl(const void __iomem *addr, const void *buf, int len) | ||
| 49 | { outsl((unsigned long)addr, buf, len); } | ||
| 50 | static inline void writesw(const void __iomem *addr, const void *buf, int len) | ||
| 51 | { outsw((unsigned long)addr, buf, len); } | ||
| 52 | static inline void writesb(const void __iomem *addr, const void *buf, int len) | ||
| 53 | { outsb((unsigned long)addr, buf, len); } | ||
| 54 | |||
| 55 | #endif | ||
| 56 | |||
| 57 | /* NOTE: these offsets are all in bytes */ | ||
| 58 | |||
| 59 | static inline u16 musb_readw(const void __iomem *addr, unsigned offset) | ||
| 60 | { return __raw_readw(addr + offset); } | ||
| 61 | |||
| 62 | static inline u32 musb_readl(const void __iomem *addr, unsigned offset) | ||
| 63 | { return __raw_readl(addr + offset); } | ||
| 64 | |||
| 65 | |||
| 66 | static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data) | ||
| 67 | { __raw_writew(data, addr + offset); } | ||
| 68 | |||
| 69 | static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) | ||
| 70 | { __raw_writel(data, addr + offset); } | ||
| 71 | |||
| 72 | |||
| 73 | #ifdef CONFIG_USB_TUSB6010 | ||
| 74 | |||
| 75 | /* | ||
| 76 | * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. | ||
| 77 | */ | ||
| 78 | static inline u8 musb_readb(const void __iomem *addr, unsigned offset) | ||
| 79 | { | ||
| 80 | u16 tmp; | ||
| 81 | u8 val; | ||
| 82 | |||
| 83 | tmp = __raw_readw(addr + (offset & ~1)); | ||
| 84 | if (offset & 1) | ||
| 85 | val = (tmp >> 8); | ||
| 86 | else | ||
| 87 | val = tmp & 0xff; | ||
| 88 | |||
| 89 | return val; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) | ||
| 93 | { | ||
| 94 | u16 tmp; | ||
| 95 | |||
| 96 | tmp = __raw_readw(addr + (offset & ~1)); | ||
| 97 | if (offset & 1) | ||
| 98 | tmp = (data << 8) | (tmp & 0xff); | ||
| 99 | else | ||
| 100 | tmp = (tmp & 0xff00) | data; | ||
| 101 | |||
| 102 | __raw_writew(tmp, addr + (offset & ~1)); | ||
| 103 | } | ||
| 104 | |||
| 105 | #else | ||
| 106 | |||
| 107 | static inline u8 musb_readb(const void __iomem *addr, unsigned offset) | ||
| 108 | { return __raw_readb(addr + offset); } | ||
| 109 | |||
| 110 | static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) | ||
| 111 | { __raw_writeb(data, addr + offset); } | ||
| 112 | |||
| 113 | #endif /* CONFIG_USB_TUSB6010 */ | ||
| 114 | |||
| 115 | #endif | ||
diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c new file mode 100644 index 000000000000..55e6b78bdccc --- /dev/null +++ b/drivers/usb/musb/musb_procfs.c | |||
| @@ -0,0 +1,830 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver debug support | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/kernel.h> | ||
| 36 | #include <linux/proc_fs.h> | ||
| 37 | #include <linux/seq_file.h> | ||
| 38 | #include <linux/uaccess.h> /* FIXME remove procfs writes */ | ||
| 39 | #include <asm/arch/hardware.h> | ||
| 40 | |||
| 41 | #include "musb_core.h" | ||
| 42 | |||
| 43 | #include "davinci.h" | ||
| 44 | |||
| 45 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 46 | |||
| 47 | static int dump_qh(struct musb_qh *qh, char *buf, unsigned max) | ||
| 48 | { | ||
| 49 | int count; | ||
| 50 | int tmp; | ||
| 51 | struct usb_host_endpoint *hep = qh->hep; | ||
| 52 | struct urb *urb; | ||
| 53 | |||
| 54 | count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n", | ||
| 55 | qh, qh->dev->devnum, qh->epnum, | ||
| 56 | ({ char *s; switch (qh->type) { | ||
| 57 | case USB_ENDPOINT_XFER_BULK: | ||
| 58 | s = "-bulk"; break; | ||
| 59 | case USB_ENDPOINT_XFER_INT: | ||
| 60 | s = "-int"; break; | ||
| 61 | case USB_ENDPOINT_XFER_CONTROL: | ||
| 62 | s = ""; break; | ||
| 63 | default: | ||
| 64 | s = "iso"; break; | ||
| 65 | }; s; }), | ||
| 66 | qh->maxpacket); | ||
| 67 | if (count <= 0) | ||
| 68 | return 0; | ||
| 69 | buf += count; | ||
| 70 | max -= count; | ||
| 71 | |||
| 72 | list_for_each_entry(urb, &hep->urb_list, urb_list) { | ||
| 73 | tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n", | ||
| 74 | usb_pipein(urb->pipe) ? "in" : "out", | ||
| 75 | urb, urb->actual_length, | ||
| 76 | urb->transfer_buffer_length); | ||
| 77 | if (tmp <= 0) | ||
| 78 | break; | ||
| 79 | tmp = min(tmp, (int)max); | ||
| 80 | count += tmp; | ||
| 81 | buf += tmp; | ||
| 82 | max -= tmp; | ||
| 83 | } | ||
| 84 | return count; | ||
| 85 | } | ||
| 86 | |||
| 87 | static int | ||
| 88 | dump_queue(struct list_head *q, char *buf, unsigned max) | ||
| 89 | { | ||
| 90 | int count = 0; | ||
| 91 | struct musb_qh *qh; | ||
| 92 | |||
| 93 | list_for_each_entry(qh, q, ring) { | ||
| 94 | int tmp; | ||
| 95 | |||
| 96 | tmp = dump_qh(qh, buf, max); | ||
| 97 | if (tmp <= 0) | ||
| 98 | break; | ||
| 99 | tmp = min(tmp, (int)max); | ||
| 100 | count += tmp; | ||
| 101 | buf += tmp; | ||
| 102 | max -= tmp; | ||
| 103 | } | ||
| 104 | return count; | ||
| 105 | } | ||
| 106 | |||
| 107 | #endif /* HCD */ | ||
| 108 | |||
| 109 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 110 | static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max) | ||
| 111 | { | ||
| 112 | char *buf = buffer; | ||
| 113 | int code = 0; | ||
| 114 | void __iomem *regs = ep->hw_ep->regs; | ||
| 115 | char *mode = "1buf"; | ||
| 116 | |||
| 117 | if (ep->is_in) { | ||
| 118 | if (ep->hw_ep->tx_double_buffered) | ||
| 119 | mode = "2buf"; | ||
| 120 | } else { | ||
| 121 | if (ep->hw_ep->rx_double_buffered) | ||
| 122 | mode = "2buf"; | ||
| 123 | } | ||
| 124 | |||
| 125 | do { | ||
| 126 | struct usb_request *req; | ||
| 127 | |||
| 128 | code = snprintf(buf, max, | ||
| 129 | "\n%s (hw%d): %s%s, csr %04x maxp %04x\n", | ||
| 130 | ep->name, ep->current_epnum, | ||
| 131 | mode, ep->dma ? " dma" : "", | ||
| 132 | musb_readw(regs, | ||
| 133 | (ep->is_in || !ep->current_epnum) | ||
| 134 | ? MUSB_TXCSR | ||
| 135 | : MUSB_RXCSR), | ||
| 136 | musb_readw(regs, ep->is_in | ||
| 137 | ? MUSB_TXMAXP | ||
| 138 | : MUSB_RXMAXP) | ||
| 139 | ); | ||
| 140 | if (code <= 0) | ||
| 141 | break; | ||
| 142 | code = min(code, (int) max); | ||
| 143 | buf += code; | ||
| 144 | max -= code; | ||
| 145 | |||
| 146 | if (is_cppi_enabled() && ep->current_epnum) { | ||
| 147 | unsigned cppi = ep->current_epnum - 1; | ||
| 148 | void __iomem *base = ep->musb->ctrl_base; | ||
| 149 | unsigned off1 = cppi << 2; | ||
| 150 | void __iomem *ram = base; | ||
| 151 | char tmp[16]; | ||
| 152 | |||
| 153 | if (ep->is_in) { | ||
| 154 | ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi); | ||
| 155 | tmp[0] = 0; | ||
| 156 | } else { | ||
| 157 | ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi); | ||
| 158 | snprintf(tmp, sizeof tmp, "%d left, ", | ||
| 159 | musb_readl(base, | ||
| 160 | DAVINCI_RXCPPI_BUFCNT0_REG + off1)); | ||
| 161 | } | ||
| 162 | |||
| 163 | code = snprintf(buf, max, "%cX DMA%d: %s" | ||
| 164 | "%08x %08x, %08x %08x; " | ||
| 165 | "%08x %08x %08x .. %08x\n", | ||
| 166 | ep->is_in ? 'T' : 'R', | ||
| 167 | ep->current_epnum - 1, tmp, | ||
| 168 | musb_readl(ram, 0 * 4), | ||
| 169 | musb_readl(ram, 1 * 4), | ||
| 170 | musb_readl(ram, 2 * 4), | ||
| 171 | musb_readl(ram, 3 * 4), | ||
| 172 | musb_readl(ram, 4 * 4), | ||
| 173 | musb_readl(ram, 5 * 4), | ||
| 174 | musb_readl(ram, 6 * 4), | ||
| 175 | musb_readl(ram, 7 * 4)); | ||
| 176 | if (code <= 0) | ||
| 177 | break; | ||
| 178 | code = min(code, (int) max); | ||
| 179 | buf += code; | ||
| 180 | max -= code; | ||
| 181 | } | ||
| 182 | |||
| 183 | if (list_empty(&ep->req_list)) { | ||
| 184 | code = snprintf(buf, max, "\t(queue empty)\n"); | ||
| 185 | if (code <= 0) | ||
| 186 | break; | ||
| 187 | code = min(code, (int) max); | ||
| 188 | buf += code; | ||
| 189 | max -= code; | ||
| 190 | break; | ||
| 191 | } | ||
| 192 | list_for_each_entry(req, &ep->req_list, list) { | ||
| 193 | code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n", | ||
| 194 | req, | ||
| 195 | req->zero ? "zero, " : "", | ||
| 196 | req->short_not_ok ? "!short, " : "", | ||
| 197 | req->actual, req->length); | ||
| 198 | if (code <= 0) | ||
| 199 | break; | ||
| 200 | code = min(code, (int) max); | ||
| 201 | buf += code; | ||
| 202 | max -= code; | ||
| 203 | } | ||
| 204 | } while (0); | ||
| 205 | return buf - buffer; | ||
| 206 | } | ||
| 207 | #endif | ||
| 208 | |||
| 209 | static int | ||
| 210 | dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max) | ||
| 211 | { | ||
| 212 | int code = 0; | ||
| 213 | char *buf = aBuffer; | ||
| 214 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | ||
| 215 | |||
| 216 | do { | ||
| 217 | musb_ep_select(musb->mregs, epnum); | ||
| 218 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 219 | if (is_host_active(musb)) { | ||
| 220 | int dump_rx, dump_tx; | ||
| 221 | void __iomem *regs = hw_ep->regs; | ||
| 222 | |||
| 223 | /* TEMPORARY (!) until we have a real periodic | ||
| 224 | * schedule tree ... | ||
| 225 | */ | ||
| 226 | if (!epnum) { | ||
| 227 | /* control is shared, uses RX queue | ||
| 228 | * but (mostly) shadowed tx registers | ||
| 229 | */ | ||
| 230 | dump_tx = !list_empty(&musb->control); | ||
| 231 | dump_rx = 0; | ||
| 232 | } else if (hw_ep == musb->bulk_ep) { | ||
| 233 | dump_tx = !list_empty(&musb->out_bulk); | ||
| 234 | dump_rx = !list_empty(&musb->in_bulk); | ||
| 235 | } else if (musb->periodic[epnum]) { | ||
| 236 | struct usb_host_endpoint *hep; | ||
| 237 | |||
| 238 | hep = musb->periodic[epnum]->hep; | ||
| 239 | dump_rx = hep->desc.bEndpointAddress | ||
| 240 | & USB_ENDPOINT_DIR_MASK; | ||
| 241 | dump_tx = !dump_rx; | ||
| 242 | } else | ||
| 243 | break; | ||
| 244 | /* END TEMPORARY */ | ||
| 245 | |||
| 246 | |||
| 247 | if (dump_rx) { | ||
| 248 | code = snprintf(buf, max, | ||
| 249 | "\nRX%d: %s rxcsr %04x interval %02x " | ||
| 250 | "max %04x type %02x; " | ||
| 251 | "dev %d hub %d port %d" | ||
| 252 | "\n", | ||
| 253 | epnum, | ||
| 254 | hw_ep->rx_double_buffered | ||
| 255 | ? "2buf" : "1buf", | ||
| 256 | musb_readw(regs, MUSB_RXCSR), | ||
| 257 | musb_readb(regs, MUSB_RXINTERVAL), | ||
| 258 | musb_readw(regs, MUSB_RXMAXP), | ||
| 259 | musb_readb(regs, MUSB_RXTYPE), | ||
| 260 | /* FIXME: assumes multipoint */ | ||
| 261 | musb_readb(musb->mregs, | ||
| 262 | MUSB_BUSCTL_OFFSET(epnum, | ||
| 263 | MUSB_RXFUNCADDR)), | ||
| 264 | musb_readb(musb->mregs, | ||
| 265 | MUSB_BUSCTL_OFFSET(epnum, | ||
| 266 | MUSB_RXHUBADDR)), | ||
| 267 | musb_readb(musb->mregs, | ||
| 268 | MUSB_BUSCTL_OFFSET(epnum, | ||
| 269 | MUSB_RXHUBPORT)) | ||
| 270 | ); | ||
| 271 | if (code <= 0) | ||
| 272 | break; | ||
| 273 | code = min(code, (int) max); | ||
| 274 | buf += code; | ||
| 275 | max -= code; | ||
| 276 | |||
| 277 | if (is_cppi_enabled() | ||
| 278 | && epnum | ||
| 279 | && hw_ep->rx_channel) { | ||
| 280 | unsigned cppi = epnum - 1; | ||
| 281 | unsigned off1 = cppi << 2; | ||
| 282 | void __iomem *base; | ||
| 283 | void __iomem *ram; | ||
| 284 | char tmp[16]; | ||
| 285 | |||
| 286 | base = musb->ctrl_base; | ||
| 287 | ram = DAVINCI_RXCPPI_STATERAM_OFFSET( | ||
| 288 | cppi) + base; | ||
| 289 | snprintf(tmp, sizeof tmp, "%d left, ", | ||
| 290 | musb_readl(base, | ||
| 291 | DAVINCI_RXCPPI_BUFCNT0_REG | ||
| 292 | + off1)); | ||
| 293 | |||
| 294 | code = snprintf(buf, max, | ||
| 295 | " rx dma%d: %s" | ||
| 296 | "%08x %08x, %08x %08x; " | ||
| 297 | "%08x %08x %08x .. %08x\n", | ||
| 298 | cppi, tmp, | ||
| 299 | musb_readl(ram, 0 * 4), | ||
| 300 | musb_readl(ram, 1 * 4), | ||
| 301 | musb_readl(ram, 2 * 4), | ||
| 302 | musb_readl(ram, 3 * 4), | ||
| 303 | musb_readl(ram, 4 * 4), | ||
| 304 | musb_readl(ram, 5 * 4), | ||
| 305 | musb_readl(ram, 6 * 4), | ||
| 306 | musb_readl(ram, 7 * 4)); | ||
| 307 | if (code <= 0) | ||
| 308 | break; | ||
| 309 | code = min(code, (int) max); | ||
| 310 | buf += code; | ||
| 311 | max -= code; | ||
| 312 | } | ||
| 313 | |||
| 314 | if (hw_ep == musb->bulk_ep | ||
| 315 | && !list_empty( | ||
| 316 | &musb->in_bulk)) { | ||
| 317 | code = dump_queue(&musb->in_bulk, | ||
| 318 | buf, max); | ||
| 319 | if (code <= 0) | ||
| 320 | break; | ||
| 321 | code = min(code, (int) max); | ||
| 322 | buf += code; | ||
| 323 | max -= code; | ||
| 324 | } else if (musb->periodic[epnum]) { | ||
| 325 | code = dump_qh(musb->periodic[epnum], | ||
| 326 | buf, max); | ||
| 327 | if (code <= 0) | ||
| 328 | break; | ||
| 329 | code = min(code, (int) max); | ||
| 330 | buf += code; | ||
| 331 | max -= code; | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | if (dump_tx) { | ||
| 336 | code = snprintf(buf, max, | ||
| 337 | "\nTX%d: %s txcsr %04x interval %02x " | ||
| 338 | "max %04x type %02x; " | ||
| 339 | "dev %d hub %d port %d" | ||
| 340 | "\n", | ||
| 341 | epnum, | ||
| 342 | hw_ep->tx_double_buffered | ||
| 343 | ? "2buf" : "1buf", | ||
| 344 | musb_readw(regs, MUSB_TXCSR), | ||
| 345 | musb_readb(regs, MUSB_TXINTERVAL), | ||
| 346 | musb_readw(regs, MUSB_TXMAXP), | ||
| 347 | musb_readb(regs, MUSB_TXTYPE), | ||
| 348 | /* FIXME: assumes multipoint */ | ||
| 349 | musb_readb(musb->mregs, | ||
| 350 | MUSB_BUSCTL_OFFSET(epnum, | ||
| 351 | MUSB_TXFUNCADDR)), | ||
| 352 | musb_readb(musb->mregs, | ||
| 353 | MUSB_BUSCTL_OFFSET(epnum, | ||
| 354 | MUSB_TXHUBADDR)), | ||
| 355 | musb_readb(musb->mregs, | ||
| 356 | MUSB_BUSCTL_OFFSET(epnum, | ||
| 357 | MUSB_TXHUBPORT)) | ||
| 358 | ); | ||
| 359 | if (code <= 0) | ||
| 360 | break; | ||
| 361 | code = min(code, (int) max); | ||
| 362 | buf += code; | ||
| 363 | max -= code; | ||
| 364 | |||
| 365 | if (is_cppi_enabled() | ||
| 366 | && epnum | ||
| 367 | && hw_ep->tx_channel) { | ||
| 368 | unsigned cppi = epnum - 1; | ||
| 369 | void __iomem *base; | ||
| 370 | void __iomem *ram; | ||
| 371 | |||
| 372 | base = musb->ctrl_base; | ||
| 373 | ram = DAVINCI_RXCPPI_STATERAM_OFFSET( | ||
| 374 | cppi) + base; | ||
| 375 | code = snprintf(buf, max, | ||
| 376 | " tx dma%d: " | ||
| 377 | "%08x %08x, %08x %08x; " | ||
| 378 | "%08x %08x %08x .. %08x\n", | ||
| 379 | cppi, | ||
| 380 | musb_readl(ram, 0 * 4), | ||
| 381 | musb_readl(ram, 1 * 4), | ||
| 382 | musb_readl(ram, 2 * 4), | ||
| 383 | musb_readl(ram, 3 * 4), | ||
| 384 | musb_readl(ram, 4 * 4), | ||
| 385 | musb_readl(ram, 5 * 4), | ||
| 386 | musb_readl(ram, 6 * 4), | ||
| 387 | musb_readl(ram, 7 * 4)); | ||
| 388 | if (code <= 0) | ||
| 389 | break; | ||
| 390 | code = min(code, (int) max); | ||
| 391 | buf += code; | ||
| 392 | max -= code; | ||
| 393 | } | ||
| 394 | |||
| 395 | if (hw_ep == musb->control_ep | ||
| 396 | && !list_empty( | ||
| 397 | &musb->control)) { | ||
| 398 | code = dump_queue(&musb->control, | ||
| 399 | buf, max); | ||
| 400 | if (code <= 0) | ||
| 401 | break; | ||
| 402 | code = min(code, (int) max); | ||
| 403 | buf += code; | ||
| 404 | max -= code; | ||
| 405 | } else if (hw_ep == musb->bulk_ep | ||
| 406 | && !list_empty( | ||
| 407 | &musb->out_bulk)) { | ||
| 408 | code = dump_queue(&musb->out_bulk, | ||
| 409 | buf, max); | ||
| 410 | if (code <= 0) | ||
| 411 | break; | ||
| 412 | code = min(code, (int) max); | ||
| 413 | buf += code; | ||
| 414 | max -= code; | ||
| 415 | } else if (musb->periodic[epnum]) { | ||
| 416 | code = dump_qh(musb->periodic[epnum], | ||
| 417 | buf, max); | ||
| 418 | if (code <= 0) | ||
| 419 | break; | ||
| 420 | code = min(code, (int) max); | ||
| 421 | buf += code; | ||
| 422 | max -= code; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | } | ||
| 426 | #endif | ||
| 427 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 428 | if (is_peripheral_active(musb)) { | ||
| 429 | code = 0; | ||
| 430 | |||
| 431 | if (hw_ep->ep_in.desc || !epnum) { | ||
| 432 | code = dump_ep(&hw_ep->ep_in, buf, max); | ||
| 433 | if (code <= 0) | ||
| 434 | break; | ||
| 435 | code = min(code, (int) max); | ||
| 436 | buf += code; | ||
| 437 | max -= code; | ||
| 438 | } | ||
| 439 | if (hw_ep->ep_out.desc) { | ||
| 440 | code = dump_ep(&hw_ep->ep_out, buf, max); | ||
| 441 | if (code <= 0) | ||
| 442 | break; | ||
| 443 | code = min(code, (int) max); | ||
| 444 | buf += code; | ||
| 445 | max -= code; | ||
| 446 | } | ||
| 447 | } | ||
| 448 | #endif | ||
| 449 | } while (0); | ||
| 450 | |||
| 451 | return buf - aBuffer; | ||
| 452 | } | ||
| 453 | |||
| 454 | /* Dump the current status and compile options. | ||
| 455 | * @param musb the device driver instance | ||
| 456 | * @param buffer where to dump the status; it must be big enough to hold the | ||
| 457 | * result otherwise "BAD THINGS HAPPENS(TM)". | ||
| 458 | */ | ||
| 459 | static int dump_header_stats(struct musb *musb, char *buffer) | ||
| 460 | { | ||
| 461 | int code, count = 0; | ||
| 462 | const void __iomem *mbase = musb->mregs; | ||
| 463 | |||
| 464 | *buffer = 0; | ||
| 465 | count = sprintf(buffer, "Status: %sHDRC, Mode=%s " | ||
| 466 | "(Power=%02x, DevCtl=%02x)\n", | ||
| 467 | (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb), | ||
| 468 | musb_readb(mbase, MUSB_POWER), | ||
| 469 | musb_readb(mbase, MUSB_DEVCTL)); | ||
| 470 | if (count <= 0) | ||
| 471 | return 0; | ||
| 472 | buffer += count; | ||
| 473 | |||
| 474 | code = sprintf(buffer, "OTG state: %s; %sactive\n", | ||
| 475 | otg_state_string(musb), | ||
| 476 | musb->is_active ? "" : "in"); | ||
| 477 | if (code <= 0) | ||
| 478 | goto done; | ||
| 479 | buffer += code; | ||
| 480 | count += code; | ||
| 481 | |||
| 482 | code = sprintf(buffer, | ||
| 483 | "Options: " | ||
| 484 | #ifdef CONFIG_MUSB_PIO_ONLY | ||
| 485 | "pio" | ||
| 486 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | ||
| 487 | "cppi-dma" | ||
| 488 | #elif defined(CONFIG_USB_INVENTRA_DMA) | ||
| 489 | "musb-dma" | ||
| 490 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
| 491 | "tusb-omap-dma" | ||
| 492 | #else | ||
| 493 | "?dma?" | ||
| 494 | #endif | ||
| 495 | ", " | ||
| 496 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 497 | "otg (peripheral+host)" | ||
| 498 | #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) | ||
| 499 | "peripheral" | ||
| 500 | #elif defined(CONFIG_USB_MUSB_HDRC_HCD) | ||
| 501 | "host" | ||
| 502 | #endif | ||
| 503 | ", debug=%d [eps=%d]\n", | ||
| 504 | debug, | ||
| 505 | musb->nr_endpoints); | ||
| 506 | if (code <= 0) | ||
| 507 | goto done; | ||
| 508 | count += code; | ||
| 509 | buffer += code; | ||
| 510 | |||
| 511 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 512 | code = sprintf(buffer, "Peripheral address: %02x\n", | ||
| 513 | musb_readb(musb->ctrl_base, MUSB_FADDR)); | ||
| 514 | if (code <= 0) | ||
| 515 | goto done; | ||
| 516 | buffer += code; | ||
| 517 | count += code; | ||
| 518 | #endif | ||
| 519 | |||
| 520 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 521 | code = sprintf(buffer, "Root port status: %08x\n", | ||
| 522 | musb->port1_status); | ||
| 523 | if (code <= 0) | ||
| 524 | goto done; | ||
| 525 | buffer += code; | ||
| 526 | count += code; | ||
| 527 | #endif | ||
| 528 | |||
| 529 | #ifdef CONFIG_ARCH_DAVINCI | ||
| 530 | code = sprintf(buffer, | ||
| 531 | "DaVinci: ctrl=%02x stat=%1x phy=%03x\n" | ||
| 532 | "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x" | ||
| 533 | "\n", | ||
| 534 | musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG), | ||
| 535 | musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG), | ||
| 536 | __raw_readl((void __force __iomem *) | ||
| 537 | IO_ADDRESS(USBPHY_CTL_PADDR)), | ||
| 538 | musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG), | ||
| 539 | musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG), | ||
| 540 | musb_readl(musb->ctrl_base, | ||
| 541 | DAVINCI_USB_INT_SOURCE_REG), | ||
| 542 | musb_readl(musb->ctrl_base, | ||
| 543 | DAVINCI_USB_INT_MASK_REG)); | ||
| 544 | if (code <= 0) | ||
| 545 | goto done; | ||
| 546 | count += code; | ||
| 547 | buffer += code; | ||
| 548 | #endif /* DAVINCI */ | ||
| 549 | |||
| 550 | #ifdef CONFIG_USB_TUSB6010 | ||
| 551 | code = sprintf(buffer, | ||
| 552 | "TUSB6010: devconf %08x, phy enable %08x drive %08x" | ||
| 553 | "\n\totg %03x timer %08x" | ||
| 554 | "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x" | ||
| 555 | "\n", | ||
| 556 | musb_readl(musb->ctrl_base, TUSB_DEV_CONF), | ||
| 557 | musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE), | ||
| 558 | musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL), | ||
| 559 | musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT), | ||
| 560 | musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER), | ||
| 561 | musb_readl(musb->ctrl_base, TUSB_PRCM_CONF), | ||
| 562 | musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT), | ||
| 563 | musb_readl(musb->ctrl_base, TUSB_INT_SRC), | ||
| 564 | musb_readl(musb->ctrl_base, TUSB_INT_MASK)); | ||
| 565 | if (code <= 0) | ||
| 566 | goto done; | ||
| 567 | count += code; | ||
| 568 | buffer += code; | ||
| 569 | #endif /* DAVINCI */ | ||
| 570 | |||
| 571 | if (is_cppi_enabled() && musb->dma_controller) { | ||
| 572 | code = sprintf(buffer, | ||
| 573 | "CPPI: txcr=%d txsrc=%01x txena=%01x; " | ||
| 574 | "rxcr=%d rxsrc=%01x rxena=%01x " | ||
| 575 | "\n", | ||
| 576 | musb_readl(musb->ctrl_base, | ||
| 577 | DAVINCI_TXCPPI_CTRL_REG), | ||
| 578 | musb_readl(musb->ctrl_base, | ||
| 579 | DAVINCI_TXCPPI_RAW_REG), | ||
| 580 | musb_readl(musb->ctrl_base, | ||
| 581 | DAVINCI_TXCPPI_INTENAB_REG), | ||
| 582 | musb_readl(musb->ctrl_base, | ||
| 583 | DAVINCI_RXCPPI_CTRL_REG), | ||
| 584 | musb_readl(musb->ctrl_base, | ||
| 585 | DAVINCI_RXCPPI_RAW_REG), | ||
| 586 | musb_readl(musb->ctrl_base, | ||
| 587 | DAVINCI_RXCPPI_INTENAB_REG)); | ||
| 588 | if (code <= 0) | ||
| 589 | goto done; | ||
| 590 | count += code; | ||
| 591 | buffer += code; | ||
| 592 | } | ||
| 593 | |||
| 594 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 595 | if (is_peripheral_enabled(musb)) { | ||
| 596 | code = sprintf(buffer, "Gadget driver: %s\n", | ||
| 597 | musb->gadget_driver | ||
| 598 | ? musb->gadget_driver->driver.name | ||
| 599 | : "(none)"); | ||
| 600 | if (code <= 0) | ||
| 601 | goto done; | ||
| 602 | count += code; | ||
| 603 | buffer += code; | ||
| 604 | } | ||
| 605 | #endif | ||
| 606 | |||
| 607 | done: | ||
| 608 | return count; | ||
| 609 | } | ||
| 610 | |||
| 611 | /* Write to ProcFS | ||
| 612 | * | ||
| 613 | * C soft-connect | ||
| 614 | * c soft-disconnect | ||
| 615 | * I enable HS | ||
| 616 | * i disable HS | ||
| 617 | * s stop session | ||
| 618 | * F force session (OTG-unfriendly) | ||
| 619 | * E rElinquish bus (OTG) | ||
| 620 | * H request host mode | ||
| 621 | * h cancel host request | ||
| 622 | * T start sending TEST_PACKET | ||
| 623 | * D<num> set/query the debug level | ||
| 624 | */ | ||
| 625 | static int musb_proc_write(struct file *file, const char __user *buffer, | ||
| 626 | unsigned long count, void *data) | ||
| 627 | { | ||
| 628 | char cmd; | ||
| 629 | u8 reg; | ||
| 630 | struct musb *musb = (struct musb *)data; | ||
| 631 | void __iomem *mbase = musb->mregs; | ||
| 632 | |||
| 633 | /* MOD_INC_USE_COUNT; */ | ||
| 634 | |||
| 635 | if (unlikely(copy_from_user(&cmd, buffer, 1))) | ||
| 636 | return -EFAULT; | ||
| 637 | |||
| 638 | switch (cmd) { | ||
| 639 | case 'C': | ||
| 640 | if (mbase) { | ||
| 641 | reg = musb_readb(mbase, MUSB_POWER) | ||
| 642 | | MUSB_POWER_SOFTCONN; | ||
| 643 | musb_writeb(mbase, MUSB_POWER, reg); | ||
| 644 | } | ||
| 645 | break; | ||
| 646 | |||
| 647 | case 'c': | ||
| 648 | if (mbase) { | ||
| 649 | reg = musb_readb(mbase, MUSB_POWER) | ||
| 650 | & ~MUSB_POWER_SOFTCONN; | ||
| 651 | musb_writeb(mbase, MUSB_POWER, reg); | ||
| 652 | } | ||
| 653 | break; | ||
| 654 | |||
| 655 | case 'I': | ||
| 656 | if (mbase) { | ||
| 657 | reg = musb_readb(mbase, MUSB_POWER) | ||
| 658 | | MUSB_POWER_HSENAB; | ||
| 659 | musb_writeb(mbase, MUSB_POWER, reg); | ||
| 660 | } | ||
| 661 | break; | ||
| 662 | |||
| 663 | case 'i': | ||
| 664 | if (mbase) { | ||
| 665 | reg = musb_readb(mbase, MUSB_POWER) | ||
| 666 | & ~MUSB_POWER_HSENAB; | ||
| 667 | musb_writeb(mbase, MUSB_POWER, reg); | ||
| 668 | } | ||
| 669 | break; | ||
| 670 | |||
| 671 | case 'F': | ||
| 672 | reg = musb_readb(mbase, MUSB_DEVCTL); | ||
| 673 | reg |= MUSB_DEVCTL_SESSION; | ||
| 674 | musb_writeb(mbase, MUSB_DEVCTL, reg); | ||
| 675 | break; | ||
| 676 | |||
| 677 | case 'H': | ||
| 678 | if (mbase) { | ||
| 679 | reg = musb_readb(mbase, MUSB_DEVCTL); | ||
| 680 | reg |= MUSB_DEVCTL_HR; | ||
| 681 | musb_writeb(mbase, MUSB_DEVCTL, reg); | ||
| 682 | /* MUSB_HST_MODE( ((struct musb*)data) ); */ | ||
| 683 | /* WARNING("Host Mode\n"); */ | ||
| 684 | } | ||
| 685 | break; | ||
| 686 | |||
| 687 | case 'h': | ||
| 688 | if (mbase) { | ||
| 689 | reg = musb_readb(mbase, MUSB_DEVCTL); | ||
| 690 | reg &= ~MUSB_DEVCTL_HR; | ||
| 691 | musb_writeb(mbase, MUSB_DEVCTL, reg); | ||
| 692 | } | ||
| 693 | break; | ||
| 694 | |||
| 695 | case 'T': | ||
| 696 | if (mbase) { | ||
| 697 | musb_load_testpacket(musb); | ||
| 698 | musb_writeb(mbase, MUSB_TESTMODE, | ||
| 699 | MUSB_TEST_PACKET); | ||
| 700 | } | ||
| 701 | break; | ||
| 702 | |||
| 703 | #if (MUSB_DEBUG > 0) | ||
| 704 | /* set/read debug level */ | ||
| 705 | case 'D':{ | ||
| 706 | if (count > 1) { | ||
| 707 | char digits[8], *p = digits; | ||
| 708 | int i = 0, level = 0, sign = 1; | ||
| 709 | int len = min(count - 1, (unsigned long)8); | ||
| 710 | |||
| 711 | if (copy_from_user(&digits, &buffer[1], len)) | ||
| 712 | return -EFAULT; | ||
| 713 | |||
| 714 | /* optional sign */ | ||
| 715 | if (*p == '-') { | ||
| 716 | len -= 1; | ||
| 717 | sign = -sign; | ||
| 718 | p++; | ||
| 719 | } | ||
| 720 | |||
| 721 | /* read it */ | ||
| 722 | while (i++ < len && *p > '0' && *p < '9') { | ||
| 723 | level = level * 10 + (*p - '0'); | ||
| 724 | p++; | ||
| 725 | } | ||
| 726 | |||
| 727 | level *= sign; | ||
| 728 | DBG(1, "debug level %d\n", level); | ||
| 729 | debug = level; | ||
| 730 | } | ||
| 731 | } | ||
| 732 | break; | ||
| 733 | |||
| 734 | |||
| 735 | case '?': | ||
| 736 | INFO("?: you are seeing it\n"); | ||
| 737 | INFO("C/c: soft connect enable/disable\n"); | ||
| 738 | INFO("I/i: hispeed enable/disable\n"); | ||
| 739 | INFO("F: force session start\n"); | ||
| 740 | INFO("H: host mode\n"); | ||
| 741 | INFO("T: start sending TEST_PACKET\n"); | ||
| 742 | INFO("D: set/read dbug level\n"); | ||
| 743 | break; | ||
| 744 | #endif | ||
| 745 | |||
| 746 | default: | ||
| 747 | ERR("Command %c not implemented\n", cmd); | ||
| 748 | break; | ||
| 749 | } | ||
| 750 | |||
| 751 | musb_platform_try_idle(musb, 0); | ||
| 752 | |||
| 753 | return count; | ||
| 754 | } | ||
| 755 | |||
| 756 | static int musb_proc_read(char *page, char **start, | ||
| 757 | off_t off, int count, int *eof, void *data) | ||
| 758 | { | ||
| 759 | char *buffer = page; | ||
| 760 | int code = 0; | ||
| 761 | unsigned long flags; | ||
| 762 | struct musb *musb = data; | ||
| 763 | unsigned epnum; | ||
| 764 | |||
| 765 | count -= off; | ||
| 766 | count -= 1; /* for NUL at end */ | ||
| 767 | if (count <= 0) | ||
| 768 | return -EINVAL; | ||
| 769 | |||
| 770 | spin_lock_irqsave(&musb->lock, flags); | ||
| 771 | |||
| 772 | code = dump_header_stats(musb, buffer); | ||
| 773 | if (code > 0) { | ||
| 774 | buffer += code; | ||
| 775 | count -= code; | ||
| 776 | } | ||
| 777 | |||
| 778 | /* generate the report for the end points */ | ||
| 779 | /* REVISIT ... not unless something's connected! */ | ||
| 780 | for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints; | ||
| 781 | epnum++) { | ||
| 782 | code = dump_end_info(musb, epnum, buffer, count); | ||
| 783 | if (code > 0) { | ||
| 784 | buffer += code; | ||
| 785 | count -= code; | ||
| 786 | } | ||
| 787 | } | ||
| 788 | |||
| 789 | musb_platform_try_idle(musb, 0); | ||
| 790 | |||
| 791 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 792 | *eof = 1; | ||
| 793 | |||
| 794 | return buffer - page; | ||
| 795 | } | ||
| 796 | |||
| 797 | void __devexit musb_debug_delete(char *name, struct musb *musb) | ||
| 798 | { | ||
| 799 | if (musb->proc_entry) | ||
| 800 | remove_proc_entry(name, NULL); | ||
| 801 | } | ||
| 802 | |||
| 803 | struct proc_dir_entry *__init | ||
| 804 | musb_debug_create(char *name, struct musb *data) | ||
| 805 | { | ||
| 806 | struct proc_dir_entry *pde; | ||
| 807 | |||
| 808 | /* FIXME convert everything to seq_file; then later, debugfs */ | ||
| 809 | |||
| 810 | if (!name) | ||
| 811 | return NULL; | ||
| 812 | |||
| 813 | pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL); | ||
| 814 | data->proc_entry = pde; | ||
| 815 | if (pde) { | ||
| 816 | pde->data = data; | ||
| 817 | /* pde->owner = THIS_MODULE; */ | ||
| 818 | |||
| 819 | pde->read_proc = musb_proc_read; | ||
| 820 | pde->write_proc = musb_proc_write; | ||
| 821 | |||
| 822 | pde->size = 0; | ||
| 823 | |||
| 824 | pr_debug("Registered /proc/%s\n", name); | ||
| 825 | } else { | ||
| 826 | pr_debug("Cannot create a valid proc file entry"); | ||
| 827 | } | ||
| 828 | |||
| 829 | return pde; | ||
| 830 | } | ||
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h new file mode 100644 index 000000000000..9c228661aa5a --- /dev/null +++ b/drivers/usb/musb/musb_regs.h | |||
| @@ -0,0 +1,300 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver register defines | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __MUSB_REGS_H__ | ||
| 36 | #define __MUSB_REGS_H__ | ||
| 37 | |||
| 38 | #define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */ | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Common USB registers | ||
| 42 | */ | ||
| 43 | |||
| 44 | #define MUSB_FADDR 0x00 /* 8-bit */ | ||
| 45 | #define MUSB_POWER 0x01 /* 8-bit */ | ||
| 46 | |||
| 47 | #define MUSB_INTRTX 0x02 /* 16-bit */ | ||
| 48 | #define MUSB_INTRRX 0x04 | ||
| 49 | #define MUSB_INTRTXE 0x06 | ||
| 50 | #define MUSB_INTRRXE 0x08 | ||
| 51 | #define MUSB_INTRUSB 0x0A /* 8 bit */ | ||
| 52 | #define MUSB_INTRUSBE 0x0B /* 8 bit */ | ||
| 53 | #define MUSB_FRAME 0x0C | ||
| 54 | #define MUSB_INDEX 0x0E /* 8 bit */ | ||
| 55 | #define MUSB_TESTMODE 0x0F /* 8 bit */ | ||
| 56 | |||
| 57 | /* Get offset for a given FIFO from musb->mregs */ | ||
| 58 | #ifdef CONFIG_USB_TUSB6010 | ||
| 59 | #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) | ||
| 60 | #else | ||
| 61 | #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) | ||
| 62 | #endif | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Additional Control Registers | ||
| 66 | */ | ||
| 67 | |||
| 68 | #define MUSB_DEVCTL 0x60 /* 8 bit */ | ||
| 69 | |||
| 70 | /* These are always controlled through the INDEX register */ | ||
| 71 | #define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */ | ||
| 72 | #define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */ | ||
| 73 | #define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */ | ||
| 74 | #define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */ | ||
| 75 | |||
| 76 | /* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ | ||
| 77 | #define MUSB_HWVERS 0x6C /* 8 bit */ | ||
| 78 | |||
| 79 | #define MUSB_EPINFO 0x78 /* 8 bit */ | ||
| 80 | #define MUSB_RAMINFO 0x79 /* 8 bit */ | ||
| 81 | #define MUSB_LINKINFO 0x7a /* 8 bit */ | ||
| 82 | #define MUSB_VPLEN 0x7b /* 8 bit */ | ||
| 83 | #define MUSB_HS_EOF1 0x7c /* 8 bit */ | ||
| 84 | #define MUSB_FS_EOF1 0x7d /* 8 bit */ | ||
| 85 | #define MUSB_LS_EOF1 0x7e /* 8 bit */ | ||
| 86 | |||
| 87 | /* Offsets to endpoint registers */ | ||
| 88 | #define MUSB_TXMAXP 0x00 | ||
| 89 | #define MUSB_TXCSR 0x02 | ||
| 90 | #define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */ | ||
| 91 | #define MUSB_RXMAXP 0x04 | ||
| 92 | #define MUSB_RXCSR 0x06 | ||
| 93 | #define MUSB_RXCOUNT 0x08 | ||
| 94 | #define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */ | ||
| 95 | #define MUSB_TXTYPE 0x0A | ||
| 96 | #define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */ | ||
| 97 | #define MUSB_TXINTERVAL 0x0B | ||
| 98 | #define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */ | ||
| 99 | #define MUSB_RXTYPE 0x0C | ||
| 100 | #define MUSB_RXINTERVAL 0x0D | ||
| 101 | #define MUSB_FIFOSIZE 0x0F | ||
| 102 | #define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */ | ||
| 103 | |||
| 104 | /* Offsets to endpoint registers in indexed model (using INDEX register) */ | ||
| 105 | #define MUSB_INDEXED_OFFSET(_epnum, _offset) \ | ||
| 106 | (0x10 + (_offset)) | ||
| 107 | |||
| 108 | /* Offsets to endpoint registers in flat models */ | ||
| 109 | #define MUSB_FLAT_OFFSET(_epnum, _offset) \ | ||
| 110 | (0x100 + (0x10*(_epnum)) + (_offset)) | ||
| 111 | |||
| 112 | #ifdef CONFIG_USB_TUSB6010 | ||
| 113 | /* TUSB6010 EP0 configuration register is special */ | ||
| 114 | #define MUSB_TUSB_OFFSET(_epnum, _offset) \ | ||
| 115 | (0x10 + _offset) | ||
| 116 | #include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */ | ||
| 117 | #endif | ||
| 118 | |||
| 119 | /* "bus control"/target registers, for host side multipoint (external hubs) */ | ||
| 120 | #define MUSB_TXFUNCADDR 0x00 | ||
| 121 | #define MUSB_TXHUBADDR 0x02 | ||
| 122 | #define MUSB_TXHUBPORT 0x03 | ||
| 123 | |||
| 124 | #define MUSB_RXFUNCADDR 0x04 | ||
| 125 | #define MUSB_RXHUBADDR 0x06 | ||
| 126 | #define MUSB_RXHUBPORT 0x07 | ||
| 127 | |||
| 128 | #define MUSB_BUSCTL_OFFSET(_epnum, _offset) \ | ||
| 129 | (0x80 + (8*(_epnum)) + (_offset)) | ||
| 130 | |||
| 131 | /* | ||
| 132 | * MUSB Register bits | ||
| 133 | */ | ||
| 134 | |||
| 135 | /* POWER */ | ||
| 136 | #define MUSB_POWER_ISOUPDATE 0x80 | ||
| 137 | #define MUSB_POWER_SOFTCONN 0x40 | ||
| 138 | #define MUSB_POWER_HSENAB 0x20 | ||
| 139 | #define MUSB_POWER_HSMODE 0x10 | ||
| 140 | #define MUSB_POWER_RESET 0x08 | ||
| 141 | #define MUSB_POWER_RESUME 0x04 | ||
| 142 | #define MUSB_POWER_SUSPENDM 0x02 | ||
| 143 | #define MUSB_POWER_ENSUSPEND 0x01 | ||
| 144 | |||
| 145 | /* INTRUSB */ | ||
| 146 | #define MUSB_INTR_SUSPEND 0x01 | ||
| 147 | #define MUSB_INTR_RESUME 0x02 | ||
| 148 | #define MUSB_INTR_RESET 0x04 | ||
| 149 | #define MUSB_INTR_BABBLE 0x04 | ||
| 150 | #define MUSB_INTR_SOF 0x08 | ||
| 151 | #define MUSB_INTR_CONNECT 0x10 | ||
| 152 | #define MUSB_INTR_DISCONNECT 0x20 | ||
| 153 | #define MUSB_INTR_SESSREQ 0x40 | ||
| 154 | #define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */ | ||
| 155 | |||
| 156 | /* DEVCTL */ | ||
| 157 | #define MUSB_DEVCTL_BDEVICE 0x80 | ||
| 158 | #define MUSB_DEVCTL_FSDEV 0x40 | ||
| 159 | #define MUSB_DEVCTL_LSDEV 0x20 | ||
| 160 | #define MUSB_DEVCTL_VBUS 0x18 | ||
| 161 | #define MUSB_DEVCTL_VBUS_SHIFT 3 | ||
| 162 | #define MUSB_DEVCTL_HM 0x04 | ||
| 163 | #define MUSB_DEVCTL_HR 0x02 | ||
| 164 | #define MUSB_DEVCTL_SESSION 0x01 | ||
| 165 | |||
| 166 | /* TESTMODE */ | ||
| 167 | #define MUSB_TEST_FORCE_HOST 0x80 | ||
| 168 | #define MUSB_TEST_FIFO_ACCESS 0x40 | ||
| 169 | #define MUSB_TEST_FORCE_FS 0x20 | ||
| 170 | #define MUSB_TEST_FORCE_HS 0x10 | ||
| 171 | #define MUSB_TEST_PACKET 0x08 | ||
| 172 | #define MUSB_TEST_K 0x04 | ||
| 173 | #define MUSB_TEST_J 0x02 | ||
| 174 | #define MUSB_TEST_SE0_NAK 0x01 | ||
| 175 | |||
| 176 | /* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */ | ||
| 177 | #define MUSB_FIFOSZ_DPB 0x10 | ||
| 178 | /* Allocation size (8, 16, 32, ... 4096) */ | ||
| 179 | #define MUSB_FIFOSZ_SIZE 0x0f | ||
| 180 | |||
| 181 | /* CSR0 */ | ||
| 182 | #define MUSB_CSR0_FLUSHFIFO 0x0100 | ||
| 183 | #define MUSB_CSR0_TXPKTRDY 0x0002 | ||
| 184 | #define MUSB_CSR0_RXPKTRDY 0x0001 | ||
| 185 | |||
| 186 | /* CSR0 in Peripheral mode */ | ||
| 187 | #define MUSB_CSR0_P_SVDSETUPEND 0x0080 | ||
| 188 | #define MUSB_CSR0_P_SVDRXPKTRDY 0x0040 | ||
| 189 | #define MUSB_CSR0_P_SENDSTALL 0x0020 | ||
| 190 | #define MUSB_CSR0_P_SETUPEND 0x0010 | ||
| 191 | #define MUSB_CSR0_P_DATAEND 0x0008 | ||
| 192 | #define MUSB_CSR0_P_SENTSTALL 0x0004 | ||
| 193 | |||
| 194 | /* CSR0 in Host mode */ | ||
| 195 | #define MUSB_CSR0_H_DIS_PING 0x0800 | ||
| 196 | #define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */ | ||
| 197 | #define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */ | ||
| 198 | #define MUSB_CSR0_H_NAKTIMEOUT 0x0080 | ||
| 199 | #define MUSB_CSR0_H_STATUSPKT 0x0040 | ||
| 200 | #define MUSB_CSR0_H_REQPKT 0x0020 | ||
| 201 | #define MUSB_CSR0_H_ERROR 0x0010 | ||
| 202 | #define MUSB_CSR0_H_SETUPPKT 0x0008 | ||
| 203 | #define MUSB_CSR0_H_RXSTALL 0x0004 | ||
| 204 | |||
| 205 | /* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */ | ||
| 206 | #define MUSB_CSR0_P_WZC_BITS \ | ||
| 207 | (MUSB_CSR0_P_SENTSTALL) | ||
| 208 | #define MUSB_CSR0_H_WZC_BITS \ | ||
| 209 | (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \ | ||
| 210 | | MUSB_CSR0_RXPKTRDY) | ||
| 211 | |||
| 212 | /* TxType/RxType */ | ||
| 213 | #define MUSB_TYPE_SPEED 0xc0 | ||
| 214 | #define MUSB_TYPE_SPEED_SHIFT 6 | ||
| 215 | #define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */ | ||
| 216 | #define MUSB_TYPE_PROTO_SHIFT 4 | ||
| 217 | #define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */ | ||
| 218 | |||
| 219 | /* CONFIGDATA */ | ||
| 220 | #define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */ | ||
| 221 | #define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */ | ||
| 222 | #define MUSB_CONFIGDATA_BIGENDIAN 0x20 | ||
| 223 | #define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ | ||
| 224 | #define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ | ||
| 225 | #define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */ | ||
| 226 | #define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ | ||
| 227 | #define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */ | ||
| 228 | |||
| 229 | /* TXCSR in Peripheral and Host mode */ | ||
| 230 | #define MUSB_TXCSR_AUTOSET 0x8000 | ||
| 231 | #define MUSB_TXCSR_MODE 0x2000 | ||
| 232 | #define MUSB_TXCSR_DMAENAB 0x1000 | ||
| 233 | #define MUSB_TXCSR_FRCDATATOG 0x0800 | ||
| 234 | #define MUSB_TXCSR_DMAMODE 0x0400 | ||
| 235 | #define MUSB_TXCSR_CLRDATATOG 0x0040 | ||
| 236 | #define MUSB_TXCSR_FLUSHFIFO 0x0008 | ||
| 237 | #define MUSB_TXCSR_FIFONOTEMPTY 0x0002 | ||
| 238 | #define MUSB_TXCSR_TXPKTRDY 0x0001 | ||
| 239 | |||
| 240 | /* TXCSR in Peripheral mode */ | ||
| 241 | #define MUSB_TXCSR_P_ISO 0x4000 | ||
| 242 | #define MUSB_TXCSR_P_INCOMPTX 0x0080 | ||
| 243 | #define MUSB_TXCSR_P_SENTSTALL 0x0020 | ||
| 244 | #define MUSB_TXCSR_P_SENDSTALL 0x0010 | ||
| 245 | #define MUSB_TXCSR_P_UNDERRUN 0x0004 | ||
| 246 | |||
| 247 | /* TXCSR in Host mode */ | ||
| 248 | #define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200 | ||
| 249 | #define MUSB_TXCSR_H_DATATOGGLE 0x0100 | ||
| 250 | #define MUSB_TXCSR_H_NAKTIMEOUT 0x0080 | ||
| 251 | #define MUSB_TXCSR_H_RXSTALL 0x0020 | ||
| 252 | #define MUSB_TXCSR_H_ERROR 0x0004 | ||
| 253 | |||
| 254 | /* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ | ||
| 255 | #define MUSB_TXCSR_P_WZC_BITS \ | ||
| 256 | (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \ | ||
| 257 | | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY) | ||
| 258 | #define MUSB_TXCSR_H_WZC_BITS \ | ||
| 259 | (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \ | ||
| 260 | | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY) | ||
| 261 | |||
| 262 | /* RXCSR in Peripheral and Host mode */ | ||
| 263 | #define MUSB_RXCSR_AUTOCLEAR 0x8000 | ||
| 264 | #define MUSB_RXCSR_DMAENAB 0x2000 | ||
| 265 | #define MUSB_RXCSR_DISNYET 0x1000 | ||
| 266 | #define MUSB_RXCSR_PID_ERR 0x1000 | ||
| 267 | #define MUSB_RXCSR_DMAMODE 0x0800 | ||
| 268 | #define MUSB_RXCSR_INCOMPRX 0x0100 | ||
| 269 | #define MUSB_RXCSR_CLRDATATOG 0x0080 | ||
| 270 | #define MUSB_RXCSR_FLUSHFIFO 0x0010 | ||
| 271 | #define MUSB_RXCSR_DATAERROR 0x0008 | ||
| 272 | #define MUSB_RXCSR_FIFOFULL 0x0002 | ||
| 273 | #define MUSB_RXCSR_RXPKTRDY 0x0001 | ||
| 274 | |||
| 275 | /* RXCSR in Peripheral mode */ | ||
| 276 | #define MUSB_RXCSR_P_ISO 0x4000 | ||
| 277 | #define MUSB_RXCSR_P_SENTSTALL 0x0040 | ||
| 278 | #define MUSB_RXCSR_P_SENDSTALL 0x0020 | ||
| 279 | #define MUSB_RXCSR_P_OVERRUN 0x0004 | ||
| 280 | |||
| 281 | /* RXCSR in Host mode */ | ||
| 282 | #define MUSB_RXCSR_H_AUTOREQ 0x4000 | ||
| 283 | #define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400 | ||
| 284 | #define MUSB_RXCSR_H_DATATOGGLE 0x0200 | ||
| 285 | #define MUSB_RXCSR_H_RXSTALL 0x0040 | ||
| 286 | #define MUSB_RXCSR_H_REQPKT 0x0020 | ||
| 287 | #define MUSB_RXCSR_H_ERROR 0x0004 | ||
| 288 | |||
| 289 | /* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ | ||
| 290 | #define MUSB_RXCSR_P_WZC_BITS \ | ||
| 291 | (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \ | ||
| 292 | | MUSB_RXCSR_RXPKTRDY) | ||
| 293 | #define MUSB_RXCSR_H_WZC_BITS \ | ||
| 294 | (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \ | ||
| 295 | | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY) | ||
| 296 | |||
| 297 | /* HUBADDR */ | ||
| 298 | #define MUSB_HUBADDR_MULTI_TT 0x80 | ||
| 299 | |||
| 300 | #endif /* __MUSB_REGS_H__ */ | ||
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c new file mode 100644 index 000000000000..e0e9ce584175 --- /dev/null +++ b/drivers/usb/musb/musb_virthub.c | |||
| @@ -0,0 +1,425 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver virtual root hub support | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * version 2 as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 32 | * | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/module.h> | ||
| 36 | #include <linux/kernel.h> | ||
| 37 | #include <linux/sched.h> | ||
| 38 | #include <linux/slab.h> | ||
| 39 | #include <linux/errno.h> | ||
| 40 | #include <linux/init.h> | ||
| 41 | #include <linux/time.h> | ||
| 42 | #include <linux/timer.h> | ||
| 43 | |||
| 44 | #include <asm/unaligned.h> | ||
| 45 | |||
| 46 | #include "musb_core.h" | ||
| 47 | |||
| 48 | |||
| 49 | static void musb_port_suspend(struct musb *musb, bool do_suspend) | ||
| 50 | { | ||
| 51 | u8 power; | ||
| 52 | void __iomem *mbase = musb->mregs; | ||
| 53 | |||
| 54 | if (!is_host_active(musb)) | ||
| 55 | return; | ||
| 56 | |||
| 57 | /* NOTE: this doesn't necessarily put PHY into low power mode, | ||
| 58 | * turning off its clock; that's a function of PHY integration and | ||
| 59 | * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect | ||
| 60 | * SE0 changing to connect (J) or wakeup (K) states. | ||
| 61 | */ | ||
| 62 | power = musb_readb(mbase, MUSB_POWER); | ||
| 63 | if (do_suspend) { | ||
| 64 | int retries = 10000; | ||
| 65 | |||
| 66 | power &= ~MUSB_POWER_RESUME; | ||
| 67 | power |= MUSB_POWER_SUSPENDM; | ||
| 68 | musb_writeb(mbase, MUSB_POWER, power); | ||
| 69 | |||
| 70 | /* Needed for OPT A tests */ | ||
| 71 | power = musb_readb(mbase, MUSB_POWER); | ||
| 72 | while (power & MUSB_POWER_SUSPENDM) { | ||
| 73 | power = musb_readb(mbase, MUSB_POWER); | ||
| 74 | if (retries-- < 1) | ||
| 75 | break; | ||
| 76 | } | ||
| 77 | |||
| 78 | DBG(3, "Root port suspended, power %02x\n", power); | ||
| 79 | |||
| 80 | musb->port1_status |= USB_PORT_STAT_SUSPEND; | ||
| 81 | switch (musb->xceiv.state) { | ||
| 82 | case OTG_STATE_A_HOST: | ||
| 83 | musb->xceiv.state = OTG_STATE_A_SUSPEND; | ||
| 84 | musb->is_active = is_otg_enabled(musb) | ||
| 85 | && musb->xceiv.host->b_hnp_enable; | ||
| 86 | musb_platform_try_idle(musb, 0); | ||
| 87 | break; | ||
| 88 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 89 | case OTG_STATE_B_HOST: | ||
| 90 | musb->xceiv.state = OTG_STATE_B_WAIT_ACON; | ||
| 91 | musb->is_active = is_otg_enabled(musb) | ||
| 92 | && musb->xceiv.host->b_hnp_enable; | ||
| 93 | musb_platform_try_idle(musb, 0); | ||
| 94 | break; | ||
| 95 | #endif | ||
| 96 | default: | ||
| 97 | DBG(1, "bogus rh suspend? %s\n", | ||
| 98 | otg_state_string(musb)); | ||
| 99 | } | ||
| 100 | } else if (power & MUSB_POWER_SUSPENDM) { | ||
| 101 | power &= ~MUSB_POWER_SUSPENDM; | ||
| 102 | power |= MUSB_POWER_RESUME; | ||
| 103 | musb_writeb(mbase, MUSB_POWER, power); | ||
| 104 | |||
| 105 | DBG(3, "Root port resuming, power %02x\n", power); | ||
| 106 | |||
| 107 | /* later, GetPortStatus will stop RESUME signaling */ | ||
| 108 | musb->port1_status |= MUSB_PORT_STAT_RESUME; | ||
| 109 | musb->rh_timer = jiffies + msecs_to_jiffies(20); | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | static void musb_port_reset(struct musb *musb, bool do_reset) | ||
| 114 | { | ||
| 115 | u8 power; | ||
| 116 | void __iomem *mbase = musb->mregs; | ||
| 117 | |||
| 118 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 119 | if (musb->xceiv.state == OTG_STATE_B_IDLE) { | ||
| 120 | DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); | ||
| 121 | musb->port1_status &= ~USB_PORT_STAT_RESET; | ||
| 122 | return; | ||
| 123 | } | ||
| 124 | #endif | ||
| 125 | |||
| 126 | if (!is_host_active(musb)) | ||
| 127 | return; | ||
| 128 | |||
| 129 | /* NOTE: caller guarantees it will turn off the reset when | ||
| 130 | * the appropriate amount of time has passed | ||
| 131 | */ | ||
| 132 | power = musb_readb(mbase, MUSB_POWER); | ||
| 133 | if (do_reset) { | ||
| 134 | |||
| 135 | /* | ||
| 136 | * If RESUME is set, we must make sure it stays minimum 20 ms. | ||
| 137 | * Then we must clear RESUME and wait a bit to let musb start | ||
| 138 | * generating SOFs. If we don't do this, OPT HS A 6.8 tests | ||
| 139 | * fail with "Error! Did not receive an SOF before suspend | ||
| 140 | * detected". | ||
| 141 | */ | ||
| 142 | if (power & MUSB_POWER_RESUME) { | ||
| 143 | while (time_before(jiffies, musb->rh_timer)) | ||
| 144 | msleep(1); | ||
| 145 | musb_writeb(mbase, MUSB_POWER, | ||
| 146 | power & ~MUSB_POWER_RESUME); | ||
| 147 | msleep(1); | ||
| 148 | } | ||
| 149 | |||
| 150 | musb->ignore_disconnect = true; | ||
| 151 | power &= 0xf0; | ||
| 152 | musb_writeb(mbase, MUSB_POWER, | ||
| 153 | power | MUSB_POWER_RESET); | ||
| 154 | |||
| 155 | musb->port1_status |= USB_PORT_STAT_RESET; | ||
| 156 | musb->port1_status &= ~USB_PORT_STAT_ENABLE; | ||
| 157 | musb->rh_timer = jiffies + msecs_to_jiffies(50); | ||
| 158 | } else { | ||
| 159 | DBG(4, "root port reset stopped\n"); | ||
| 160 | musb_writeb(mbase, MUSB_POWER, | ||
| 161 | power & ~MUSB_POWER_RESET); | ||
| 162 | |||
| 163 | musb->ignore_disconnect = false; | ||
| 164 | |||
| 165 | power = musb_readb(mbase, MUSB_POWER); | ||
| 166 | if (power & MUSB_POWER_HSMODE) { | ||
| 167 | DBG(4, "high-speed device connected\n"); | ||
| 168 | musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; | ||
| 169 | } | ||
| 170 | |||
| 171 | musb->port1_status &= ~USB_PORT_STAT_RESET; | ||
| 172 | musb->port1_status |= USB_PORT_STAT_ENABLE | ||
| 173 | | (USB_PORT_STAT_C_RESET << 16) | ||
| 174 | | (USB_PORT_STAT_C_ENABLE << 16); | ||
| 175 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
| 176 | |||
| 177 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | void musb_root_disconnect(struct musb *musb) | ||
| 182 | { | ||
| 183 | musb->port1_status = (1 << USB_PORT_FEAT_POWER) | ||
| 184 | | (1 << USB_PORT_FEAT_C_CONNECTION); | ||
| 185 | |||
| 186 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
| 187 | musb->is_active = 0; | ||
| 188 | |||
| 189 | switch (musb->xceiv.state) { | ||
| 190 | case OTG_STATE_A_HOST: | ||
| 191 | case OTG_STATE_A_SUSPEND: | ||
| 192 | musb->xceiv.state = OTG_STATE_A_WAIT_BCON; | ||
| 193 | musb->is_active = 0; | ||
| 194 | break; | ||
| 195 | case OTG_STATE_A_WAIT_VFALL: | ||
| 196 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 197 | break; | ||
| 198 | default: | ||
| 199 | DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 203 | |||
| 204 | /*---------------------------------------------------------------------*/ | ||
| 205 | |||
| 206 | /* Caller may or may not hold musb->lock */ | ||
| 207 | int musb_hub_status_data(struct usb_hcd *hcd, char *buf) | ||
| 208 | { | ||
| 209 | struct musb *musb = hcd_to_musb(hcd); | ||
| 210 | int retval = 0; | ||
| 211 | |||
| 212 | /* called in_irq() via usb_hcd_poll_rh_status() */ | ||
| 213 | if (musb->port1_status & 0xffff0000) { | ||
| 214 | *buf = 0x02; | ||
| 215 | retval = 1; | ||
| 216 | } | ||
| 217 | return retval; | ||
| 218 | } | ||
| 219 | |||
| 220 | int musb_hub_control( | ||
| 221 | struct usb_hcd *hcd, | ||
| 222 | u16 typeReq, | ||
| 223 | u16 wValue, | ||
| 224 | u16 wIndex, | ||
| 225 | char *buf, | ||
| 226 | u16 wLength) | ||
| 227 | { | ||
| 228 | struct musb *musb = hcd_to_musb(hcd); | ||
| 229 | u32 temp; | ||
| 230 | int retval = 0; | ||
| 231 | unsigned long flags; | ||
| 232 | |||
| 233 | spin_lock_irqsave(&musb->lock, flags); | ||
| 234 | |||
| 235 | if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) { | ||
| 236 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 237 | return -ESHUTDOWN; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* hub features: always zero, setting is a NOP | ||
| 241 | * port features: reported, sometimes updated when host is active | ||
| 242 | * no indicators | ||
| 243 | */ | ||
| 244 | switch (typeReq) { | ||
| 245 | case ClearHubFeature: | ||
| 246 | case SetHubFeature: | ||
| 247 | switch (wValue) { | ||
| 248 | case C_HUB_OVER_CURRENT: | ||
| 249 | case C_HUB_LOCAL_POWER: | ||
| 250 | break; | ||
| 251 | default: | ||
| 252 | goto error; | ||
| 253 | } | ||
| 254 | break; | ||
| 255 | case ClearPortFeature: | ||
| 256 | if ((wIndex & 0xff) != 1) | ||
| 257 | goto error; | ||
| 258 | |||
| 259 | switch (wValue) { | ||
| 260 | case USB_PORT_FEAT_ENABLE: | ||
| 261 | break; | ||
| 262 | case USB_PORT_FEAT_SUSPEND: | ||
| 263 | musb_port_suspend(musb, false); | ||
| 264 | break; | ||
| 265 | case USB_PORT_FEAT_POWER: | ||
| 266 | if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) | ||
| 267 | musb_set_vbus(musb, 0); | ||
| 268 | break; | ||
| 269 | case USB_PORT_FEAT_C_CONNECTION: | ||
| 270 | case USB_PORT_FEAT_C_ENABLE: | ||
| 271 | case USB_PORT_FEAT_C_OVER_CURRENT: | ||
| 272 | case USB_PORT_FEAT_C_RESET: | ||
| 273 | case USB_PORT_FEAT_C_SUSPEND: | ||
| 274 | break; | ||
| 275 | default: | ||
| 276 | goto error; | ||
| 277 | } | ||
| 278 | DBG(5, "clear feature %d\n", wValue); | ||
| 279 | musb->port1_status &= ~(1 << wValue); | ||
| 280 | break; | ||
| 281 | case GetHubDescriptor: | ||
| 282 | { | ||
| 283 | struct usb_hub_descriptor *desc = (void *)buf; | ||
| 284 | |||
| 285 | desc->bDescLength = 9; | ||
| 286 | desc->bDescriptorType = 0x29; | ||
| 287 | desc->bNbrPorts = 1; | ||
| 288 | desc->wHubCharacteristics = __constant_cpu_to_le16( | ||
| 289 | 0x0001 /* per-port power switching */ | ||
| 290 | | 0x0010 /* no overcurrent reporting */ | ||
| 291 | ); | ||
| 292 | desc->bPwrOn2PwrGood = 5; /* msec/2 */ | ||
| 293 | desc->bHubContrCurrent = 0; | ||
| 294 | |||
| 295 | /* workaround bogus struct definition */ | ||
| 296 | desc->DeviceRemovable[0] = 0x02; /* port 1 */ | ||
| 297 | desc->DeviceRemovable[1] = 0xff; | ||
| 298 | } | ||
| 299 | break; | ||
| 300 | case GetHubStatus: | ||
| 301 | temp = 0; | ||
| 302 | *(__le32 *) buf = cpu_to_le32(temp); | ||
| 303 | break; | ||
| 304 | case GetPortStatus: | ||
| 305 | if (wIndex != 1) | ||
| 306 | goto error; | ||
| 307 | |||
| 308 | /* finish RESET signaling? */ | ||
| 309 | if ((musb->port1_status & USB_PORT_STAT_RESET) | ||
| 310 | && time_after_eq(jiffies, musb->rh_timer)) | ||
| 311 | musb_port_reset(musb, false); | ||
| 312 | |||
| 313 | /* finish RESUME signaling? */ | ||
| 314 | if ((musb->port1_status & MUSB_PORT_STAT_RESUME) | ||
| 315 | && time_after_eq(jiffies, musb->rh_timer)) { | ||
| 316 | u8 power; | ||
| 317 | |||
| 318 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
| 319 | power &= ~MUSB_POWER_RESUME; | ||
| 320 | DBG(4, "root port resume stopped, power %02x\n", | ||
| 321 | power); | ||
| 322 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
| 323 | |||
| 324 | /* ISSUE: DaVinci (RTL 1.300) disconnects after | ||
| 325 | * resume of high speed peripherals (but not full | ||
| 326 | * speed ones). | ||
| 327 | */ | ||
| 328 | |||
| 329 | musb->is_active = 1; | ||
| 330 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | ||
| 331 | | MUSB_PORT_STAT_RESUME); | ||
| 332 | musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
| 333 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
| 334 | /* NOTE: it might really be A_WAIT_BCON ... */ | ||
| 335 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
| 336 | } | ||
| 337 | |||
| 338 | put_unaligned(cpu_to_le32(musb->port1_status | ||
| 339 | & ~MUSB_PORT_STAT_RESUME), | ||
| 340 | (__le32 *) buf); | ||
| 341 | |||
| 342 | /* port change status is more interesting */ | ||
| 343 | DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n", | ||
| 344 | musb->port1_status); | ||
| 345 | break; | ||
| 346 | case SetPortFeature: | ||
| 347 | if ((wIndex & 0xff) != 1) | ||
| 348 | goto error; | ||
| 349 | |||
| 350 | switch (wValue) { | ||
| 351 | case USB_PORT_FEAT_POWER: | ||
| 352 | /* NOTE: this controller has a strange state machine | ||
| 353 | * that involves "requesting sessions" according to | ||
| 354 | * magic side effects from incompletely-described | ||
| 355 | * rules about startup... | ||
| 356 | * | ||
| 357 | * This call is what really starts the host mode; be | ||
| 358 | * very careful about side effects if you reorder any | ||
| 359 | * initialization logic, e.g. for OTG, or change any | ||
| 360 | * logic relating to VBUS power-up. | ||
| 361 | */ | ||
| 362 | if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) | ||
| 363 | musb_start(musb); | ||
| 364 | break; | ||
| 365 | case USB_PORT_FEAT_RESET: | ||
| 366 | musb_port_reset(musb, true); | ||
| 367 | break; | ||
| 368 | case USB_PORT_FEAT_SUSPEND: | ||
| 369 | musb_port_suspend(musb, true); | ||
| 370 | break; | ||
| 371 | case USB_PORT_FEAT_TEST: | ||
| 372 | if (unlikely(is_host_active(musb))) | ||
| 373 | goto error; | ||
| 374 | |||
| 375 | wIndex >>= 8; | ||
| 376 | switch (wIndex) { | ||
| 377 | case 1: | ||
| 378 | pr_debug("TEST_J\n"); | ||
| 379 | temp = MUSB_TEST_J; | ||
| 380 | break; | ||
| 381 | case 2: | ||
| 382 | pr_debug("TEST_K\n"); | ||
| 383 | temp = MUSB_TEST_K; | ||
| 384 | break; | ||
| 385 | case 3: | ||
| 386 | pr_debug("TEST_SE0_NAK\n"); | ||
| 387 | temp = MUSB_TEST_SE0_NAK; | ||
| 388 | break; | ||
| 389 | case 4: | ||
| 390 | pr_debug("TEST_PACKET\n"); | ||
| 391 | temp = MUSB_TEST_PACKET; | ||
| 392 | musb_load_testpacket(musb); | ||
| 393 | break; | ||
| 394 | case 5: | ||
| 395 | pr_debug("TEST_FORCE_ENABLE\n"); | ||
| 396 | temp = MUSB_TEST_FORCE_HOST | ||
| 397 | | MUSB_TEST_FORCE_HS; | ||
| 398 | |||
| 399 | musb_writeb(musb->mregs, MUSB_DEVCTL, | ||
| 400 | MUSB_DEVCTL_SESSION); | ||
| 401 | break; | ||
| 402 | case 6: | ||
| 403 | pr_debug("TEST_FIFO_ACCESS\n"); | ||
| 404 | temp = MUSB_TEST_FIFO_ACCESS; | ||
| 405 | break; | ||
| 406 | default: | ||
| 407 | goto error; | ||
| 408 | } | ||
| 409 | musb_writeb(musb->mregs, MUSB_TESTMODE, temp); | ||
| 410 | break; | ||
| 411 | default: | ||
| 412 | goto error; | ||
| 413 | } | ||
| 414 | DBG(5, "set feature %d\n", wValue); | ||
| 415 | musb->port1_status |= 1 << wValue; | ||
| 416 | break; | ||
| 417 | |||
| 418 | default: | ||
| 419 | error: | ||
| 420 | /* "protocol stall" on error */ | ||
| 421 | retval = -EPIPE; | ||
| 422 | } | ||
| 423 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 424 | return retval; | ||
| 425 | } | ||
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c new file mode 100644 index 000000000000..9ba8fb7fcd24 --- /dev/null +++ b/drivers/usb/musb/musbhsdma.c | |||
| @@ -0,0 +1,433 @@ | |||
| 1 | /* | ||
| 2 | * MUSB OTG driver - support for Mentor's DMA controller | ||
| 3 | * | ||
| 4 | * Copyright 2005 Mentor Graphics Corporation | ||
| 5 | * Copyright (C) 2005-2007 by Texas Instruments | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * version 2 as published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 19 | * 02110-1301 USA | ||
| 20 | * | ||
| 21 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
| 22 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
| 23 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
| 24 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
| 26 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
| 27 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
| 28 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
| 30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | #include <linux/device.h> | ||
| 34 | #include <linux/interrupt.h> | ||
| 35 | #include <linux/platform_device.h> | ||
| 36 | #include "musb_core.h" | ||
| 37 | |||
| 38 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | ||
| 39 | #include "omap2430.h" | ||
| 40 | #endif | ||
| 41 | |||
| 42 | #define MUSB_HSDMA_BASE 0x200 | ||
| 43 | #define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0) | ||
| 44 | #define MUSB_HSDMA_CONTROL 0x4 | ||
| 45 | #define MUSB_HSDMA_ADDRESS 0x8 | ||
| 46 | #define MUSB_HSDMA_COUNT 0xc | ||
| 47 | |||
| 48 | #define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \ | ||
| 49 | (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset) | ||
| 50 | |||
| 51 | /* control register (16-bit): */ | ||
| 52 | #define MUSB_HSDMA_ENABLE_SHIFT 0 | ||
| 53 | #define MUSB_HSDMA_TRANSMIT_SHIFT 1 | ||
| 54 | #define MUSB_HSDMA_MODE1_SHIFT 2 | ||
| 55 | #define MUSB_HSDMA_IRQENABLE_SHIFT 3 | ||
| 56 | #define MUSB_HSDMA_ENDPOINT_SHIFT 4 | ||
| 57 | #define MUSB_HSDMA_BUSERROR_SHIFT 8 | ||
| 58 | #define MUSB_HSDMA_BURSTMODE_SHIFT 9 | ||
| 59 | #define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT) | ||
| 60 | #define MUSB_HSDMA_BURSTMODE_UNSPEC 0 | ||
| 61 | #define MUSB_HSDMA_BURSTMODE_INCR4 1 | ||
| 62 | #define MUSB_HSDMA_BURSTMODE_INCR8 2 | ||
| 63 | #define MUSB_HSDMA_BURSTMODE_INCR16 3 | ||
| 64 | |||
| 65 | #define MUSB_HSDMA_CHANNELS 8 | ||
| 66 | |||
| 67 | struct musb_dma_controller; | ||
| 68 | |||
| 69 | struct musb_dma_channel { | ||
| 70 | struct dma_channel Channel; | ||
| 71 | struct musb_dma_controller *controller; | ||
| 72 | u32 dwStartAddress; | ||
| 73 | u32 len; | ||
| 74 | u16 wMaxPacketSize; | ||
| 75 | u8 bIndex; | ||
| 76 | u8 epnum; | ||
| 77 | u8 transmit; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct musb_dma_controller { | ||
| 81 | struct dma_controller Controller; | ||
| 82 | struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS]; | ||
| 83 | void *pDmaPrivate; | ||
| 84 | void __iomem *pCoreBase; | ||
| 85 | u8 bChannelCount; | ||
| 86 | u8 bmUsedChannels; | ||
| 87 | u8 irq; | ||
| 88 | }; | ||
| 89 | |||
| 90 | static int dma_controller_start(struct dma_controller *c) | ||
| 91 | { | ||
| 92 | /* nothing to do */ | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | static void dma_channel_release(struct dma_channel *pChannel); | ||
| 97 | |||
| 98 | static int dma_controller_stop(struct dma_controller *c) | ||
| 99 | { | ||
| 100 | struct musb_dma_controller *controller = | ||
| 101 | container_of(c, struct musb_dma_controller, Controller); | ||
| 102 | struct musb *musb = (struct musb *) controller->pDmaPrivate; | ||
| 103 | struct dma_channel *pChannel; | ||
| 104 | u8 bBit; | ||
| 105 | |||
| 106 | if (controller->bmUsedChannels != 0) { | ||
| 107 | dev_err(musb->controller, | ||
| 108 | "Stopping DMA controller while channel active\n"); | ||
| 109 | |||
| 110 | for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { | ||
| 111 | if (controller->bmUsedChannels & (1 << bBit)) { | ||
| 112 | pChannel = &controller->aChannel[bBit].Channel; | ||
| 113 | dma_channel_release(pChannel); | ||
| 114 | |||
| 115 | if (!controller->bmUsedChannels) | ||
| 116 | break; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | } | ||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | static struct dma_channel *dma_channel_allocate(struct dma_controller *c, | ||
| 124 | struct musb_hw_ep *hw_ep, u8 transmit) | ||
| 125 | { | ||
| 126 | u8 bBit; | ||
| 127 | struct dma_channel *pChannel = NULL; | ||
| 128 | struct musb_dma_channel *pImplChannel = NULL; | ||
| 129 | struct musb_dma_controller *controller = | ||
| 130 | container_of(c, struct musb_dma_controller, Controller); | ||
| 131 | |||
| 132 | for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { | ||
| 133 | if (!(controller->bmUsedChannels & (1 << bBit))) { | ||
| 134 | controller->bmUsedChannels |= (1 << bBit); | ||
| 135 | pImplChannel = &(controller->aChannel[bBit]); | ||
| 136 | pImplChannel->controller = controller; | ||
| 137 | pImplChannel->bIndex = bBit; | ||
| 138 | pImplChannel->epnum = hw_ep->epnum; | ||
| 139 | pImplChannel->transmit = transmit; | ||
| 140 | pChannel = &(pImplChannel->Channel); | ||
| 141 | pChannel->private_data = pImplChannel; | ||
| 142 | pChannel->status = MUSB_DMA_STATUS_FREE; | ||
| 143 | pChannel->max_len = 0x10000; | ||
| 144 | /* Tx => mode 1; Rx => mode 0 */ | ||
| 145 | pChannel->desired_mode = transmit; | ||
| 146 | pChannel->actual_len = 0; | ||
| 147 | break; | ||
| 148 | } | ||
| 149 | } | ||
| 150 | return pChannel; | ||
| 151 | } | ||
| 152 | |||
| 153 | static void dma_channel_release(struct dma_channel *pChannel) | ||
| 154 | { | ||
| 155 | struct musb_dma_channel *pImplChannel = | ||
| 156 | (struct musb_dma_channel *) pChannel->private_data; | ||
| 157 | |||
| 158 | pChannel->actual_len = 0; | ||
| 159 | pImplChannel->dwStartAddress = 0; | ||
| 160 | pImplChannel->len = 0; | ||
| 161 | |||
| 162 | pImplChannel->controller->bmUsedChannels &= | ||
| 163 | ~(1 << pImplChannel->bIndex); | ||
| 164 | |||
| 165 | pChannel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 166 | } | ||
| 167 | |||
| 168 | static void configure_channel(struct dma_channel *pChannel, | ||
| 169 | u16 packet_sz, u8 mode, | ||
| 170 | dma_addr_t dma_addr, u32 len) | ||
| 171 | { | ||
| 172 | struct musb_dma_channel *pImplChannel = | ||
| 173 | (struct musb_dma_channel *) pChannel->private_data; | ||
| 174 | struct musb_dma_controller *controller = pImplChannel->controller; | ||
| 175 | void __iomem *mbase = controller->pCoreBase; | ||
| 176 | u8 bChannel = pImplChannel->bIndex; | ||
| 177 | u16 csr = 0; | ||
| 178 | |||
| 179 | DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", | ||
| 180 | pChannel, packet_sz, dma_addr, len, mode); | ||
| 181 | |||
| 182 | if (mode) { | ||
| 183 | csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; | ||
| 184 | BUG_ON(len < packet_sz); | ||
| 185 | |||
| 186 | if (packet_sz >= 64) { | ||
| 187 | csr |= MUSB_HSDMA_BURSTMODE_INCR16 | ||
| 188 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
| 189 | } else if (packet_sz >= 32) { | ||
| 190 | csr |= MUSB_HSDMA_BURSTMODE_INCR8 | ||
| 191 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
| 192 | } else if (packet_sz >= 16) { | ||
| 193 | csr |= MUSB_HSDMA_BURSTMODE_INCR4 | ||
| 194 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
| 195 | } | ||
| 196 | } | ||
| 197 | |||
| 198 | csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) | ||
| 199 | | (1 << MUSB_HSDMA_ENABLE_SHIFT) | ||
| 200 | | (1 << MUSB_HSDMA_IRQENABLE_SHIFT) | ||
| 201 | | (pImplChannel->transmit | ||
| 202 | ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT) | ||
| 203 | : 0); | ||
| 204 | |||
| 205 | /* address/count */ | ||
| 206 | musb_writel(mbase, | ||
| 207 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), | ||
| 208 | dma_addr); | ||
| 209 | musb_writel(mbase, | ||
| 210 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), | ||
| 211 | len); | ||
| 212 | |||
| 213 | /* control (this should start things) */ | ||
| 214 | musb_writew(mbase, | ||
| 215 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), | ||
| 216 | csr); | ||
| 217 | } | ||
| 218 | |||
| 219 | static int dma_channel_program(struct dma_channel *pChannel, | ||
| 220 | u16 packet_sz, u8 mode, | ||
| 221 | dma_addr_t dma_addr, u32 len) | ||
| 222 | { | ||
| 223 | struct musb_dma_channel *pImplChannel = | ||
| 224 | (struct musb_dma_channel *) pChannel->private_data; | ||
| 225 | |||
| 226 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", | ||
| 227 | pImplChannel->epnum, | ||
| 228 | pImplChannel->transmit ? "Tx" : "Rx", | ||
| 229 | packet_sz, dma_addr, len, mode); | ||
| 230 | |||
| 231 | BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN || | ||
| 232 | pChannel->status == MUSB_DMA_STATUS_BUSY); | ||
| 233 | |||
| 234 | pChannel->actual_len = 0; | ||
| 235 | pImplChannel->dwStartAddress = dma_addr; | ||
| 236 | pImplChannel->len = len; | ||
| 237 | pImplChannel->wMaxPacketSize = packet_sz; | ||
| 238 | pChannel->status = MUSB_DMA_STATUS_BUSY; | ||
| 239 | |||
| 240 | if ((mode == 1) && (len >= packet_sz)) | ||
| 241 | configure_channel(pChannel, packet_sz, 1, dma_addr, len); | ||
| 242 | else | ||
| 243 | configure_channel(pChannel, packet_sz, 0, dma_addr, len); | ||
| 244 | |||
| 245 | return true; | ||
| 246 | } | ||
| 247 | |||
| 248 | static int dma_channel_abort(struct dma_channel *pChannel) | ||
| 249 | { | ||
| 250 | struct musb_dma_channel *pImplChannel = | ||
| 251 | (struct musb_dma_channel *) pChannel->private_data; | ||
| 252 | u8 bChannel = pImplChannel->bIndex; | ||
| 253 | void __iomem *mbase = pImplChannel->controller->pCoreBase; | ||
| 254 | u16 csr; | ||
| 255 | |||
| 256 | if (pChannel->status == MUSB_DMA_STATUS_BUSY) { | ||
| 257 | if (pImplChannel->transmit) { | ||
| 258 | |||
| 259 | csr = musb_readw(mbase, | ||
| 260 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
| 261 | MUSB_TXCSR)); | ||
| 262 | csr &= ~(MUSB_TXCSR_AUTOSET | | ||
| 263 | MUSB_TXCSR_DMAENAB | | ||
| 264 | MUSB_TXCSR_DMAMODE); | ||
| 265 | musb_writew(mbase, | ||
| 266 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
| 267 | MUSB_TXCSR), | ||
| 268 | csr); | ||
| 269 | } else { | ||
| 270 | csr = musb_readw(mbase, | ||
| 271 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
| 272 | MUSB_RXCSR)); | ||
| 273 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | | ||
| 274 | MUSB_RXCSR_DMAENAB | | ||
| 275 | MUSB_RXCSR_DMAMODE); | ||
| 276 | musb_writew(mbase, | ||
| 277 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
| 278 | MUSB_RXCSR), | ||
| 279 | csr); | ||
| 280 | } | ||
| 281 | |||
| 282 | musb_writew(mbase, | ||
| 283 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), | ||
| 284 | 0); | ||
| 285 | musb_writel(mbase, | ||
| 286 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), | ||
| 287 | 0); | ||
| 288 | musb_writel(mbase, | ||
| 289 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), | ||
| 290 | 0); | ||
| 291 | |||
| 292 | pChannel->status = MUSB_DMA_STATUS_FREE; | ||
| 293 | } | ||
| 294 | return 0; | ||
| 295 | } | ||
| 296 | |||
| 297 | static irqreturn_t dma_controller_irq(int irq, void *private_data) | ||
| 298 | { | ||
| 299 | struct musb_dma_controller *controller = | ||
| 300 | (struct musb_dma_controller *)private_data; | ||
| 301 | struct musb_dma_channel *pImplChannel; | ||
| 302 | struct musb *musb = controller->pDmaPrivate; | ||
| 303 | void __iomem *mbase = controller->pCoreBase; | ||
| 304 | struct dma_channel *pChannel; | ||
| 305 | u8 bChannel; | ||
| 306 | u16 csr; | ||
| 307 | u32 dwAddress; | ||
| 308 | u8 int_hsdma; | ||
| 309 | irqreturn_t retval = IRQ_NONE; | ||
| 310 | unsigned long flags; | ||
| 311 | |||
| 312 | spin_lock_irqsave(&musb->lock, flags); | ||
| 313 | |||
| 314 | int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); | ||
| 315 | if (!int_hsdma) | ||
| 316 | goto done; | ||
| 317 | |||
| 318 | for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) { | ||
| 319 | if (int_hsdma & (1 << bChannel)) { | ||
| 320 | pImplChannel = (struct musb_dma_channel *) | ||
| 321 | &(controller->aChannel[bChannel]); | ||
| 322 | pChannel = &pImplChannel->Channel; | ||
| 323 | |||
| 324 | csr = musb_readw(mbase, | ||
| 325 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, | ||
| 326 | MUSB_HSDMA_CONTROL)); | ||
| 327 | |||
| 328 | if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) | ||
| 329 | pImplChannel->Channel.status = | ||
| 330 | MUSB_DMA_STATUS_BUS_ABORT; | ||
| 331 | else { | ||
| 332 | u8 devctl; | ||
| 333 | |||
| 334 | dwAddress = musb_readl(mbase, | ||
| 335 | MUSB_HSDMA_CHANNEL_OFFSET( | ||
| 336 | bChannel, | ||
| 337 | MUSB_HSDMA_ADDRESS)); | ||
| 338 | pChannel->actual_len = dwAddress | ||
| 339 | - pImplChannel->dwStartAddress; | ||
| 340 | |||
| 341 | DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", | ||
| 342 | pChannel, pImplChannel->dwStartAddress, | ||
| 343 | dwAddress, pChannel->actual_len, | ||
| 344 | pImplChannel->len, | ||
| 345 | (pChannel->actual_len | ||
| 346 | < pImplChannel->len) ? | ||
| 347 | "=> reconfig 0" : "=> complete"); | ||
| 348 | |||
| 349 | devctl = musb_readb(mbase, MUSB_DEVCTL); | ||
| 350 | |||
| 351 | pChannel->status = MUSB_DMA_STATUS_FREE; | ||
| 352 | |||
| 353 | /* completed */ | ||
| 354 | if ((devctl & MUSB_DEVCTL_HM) | ||
| 355 | && (pImplChannel->transmit) | ||
| 356 | && ((pChannel->desired_mode == 0) | ||
| 357 | || (pChannel->actual_len & | ||
| 358 | (pImplChannel->wMaxPacketSize - 1))) | ||
| 359 | ) { | ||
| 360 | /* Send out the packet */ | ||
| 361 | musb_ep_select(mbase, | ||
| 362 | pImplChannel->epnum); | ||
| 363 | musb_writew(mbase, MUSB_EP_OFFSET( | ||
| 364 | pImplChannel->epnum, | ||
| 365 | MUSB_TXCSR), | ||
| 366 | MUSB_TXCSR_TXPKTRDY); | ||
| 367 | } else | ||
| 368 | musb_dma_completion( | ||
| 369 | musb, | ||
| 370 | pImplChannel->epnum, | ||
| 371 | pImplChannel->transmit); | ||
| 372 | } | ||
| 373 | } | ||
| 374 | } | ||
| 375 | retval = IRQ_HANDLED; | ||
| 376 | done: | ||
| 377 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 378 | return retval; | ||
| 379 | } | ||
| 380 | |||
| 381 | void dma_controller_destroy(struct dma_controller *c) | ||
| 382 | { | ||
| 383 | struct musb_dma_controller *controller; | ||
| 384 | |||
| 385 | controller = container_of(c, struct musb_dma_controller, Controller); | ||
| 386 | if (!controller) | ||
| 387 | return; | ||
| 388 | |||
| 389 | if (controller->irq) | ||
| 390 | free_irq(controller->irq, c); | ||
| 391 | |||
| 392 | kfree(controller); | ||
| 393 | } | ||
| 394 | |||
| 395 | struct dma_controller *__init | ||
| 396 | dma_controller_create(struct musb *musb, void __iomem *pCoreBase) | ||
| 397 | { | ||
| 398 | struct musb_dma_controller *controller; | ||
| 399 | struct device *dev = musb->controller; | ||
| 400 | struct platform_device *pdev = to_platform_device(dev); | ||
| 401 | int irq = platform_get_irq(pdev, 1); | ||
| 402 | |||
| 403 | if (irq == 0) { | ||
| 404 | dev_err(dev, "No DMA interrupt line!\n"); | ||
| 405 | return NULL; | ||
| 406 | } | ||
| 407 | |||
| 408 | controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL); | ||
| 409 | if (!controller) | ||
| 410 | return NULL; | ||
| 411 | |||
| 412 | controller->bChannelCount = MUSB_HSDMA_CHANNELS; | ||
| 413 | controller->pDmaPrivate = musb; | ||
| 414 | controller->pCoreBase = pCoreBase; | ||
| 415 | |||
| 416 | controller->Controller.start = dma_controller_start; | ||
| 417 | controller->Controller.stop = dma_controller_stop; | ||
| 418 | controller->Controller.channel_alloc = dma_channel_allocate; | ||
| 419 | controller->Controller.channel_release = dma_channel_release; | ||
| 420 | controller->Controller.channel_program = dma_channel_program; | ||
| 421 | controller->Controller.channel_abort = dma_channel_abort; | ||
| 422 | |||
| 423 | if (request_irq(irq, dma_controller_irq, IRQF_DISABLED, | ||
| 424 | musb->controller->bus_id, &controller->Controller)) { | ||
| 425 | dev_err(dev, "request_irq %d failed!\n", irq); | ||
| 426 | dma_controller_destroy(&controller->Controller); | ||
| 427 | return NULL; | ||
| 428 | } | ||
| 429 | |||
| 430 | controller->irq = irq; | ||
| 431 | |||
| 432 | return &controller->Controller; | ||
| 433 | } | ||
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c new file mode 100644 index 000000000000..298b22e6ad0d --- /dev/null +++ b/drivers/usb/musb/omap2430.c | |||
| @@ -0,0 +1,324 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005-2007 by Texas Instruments | ||
| 3 | * Some code has been taken from tusb6010.c | ||
| 4 | * Copyrights for that are attributable to: | ||
| 5 | * Copyright (C) 2006 Nokia Corporation | ||
| 6 | * Jarkko Nikula <jarkko.nikula@nokia.com> | ||
| 7 | * Tony Lindgren <tony@atomide.com> | ||
| 8 | * | ||
| 9 | * This file is part of the Inventra Controller Driver for Linux. | ||
| 10 | * | ||
| 11 | * The Inventra Controller Driver for Linux is free software; you | ||
| 12 | * can redistribute it and/or modify it under the terms of the GNU | ||
| 13 | * General Public License version 2 as published by the Free Software | ||
| 14 | * Foundation. | ||
| 15 | * | ||
| 16 | * The Inventra Controller Driver for Linux is distributed in | ||
| 17 | * the hope that it will be useful, but WITHOUT ANY WARRANTY; | ||
| 18 | * without even the implied warranty of MERCHANTABILITY or | ||
| 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | ||
| 20 | * License for more details. | ||
| 21 | * | ||
| 22 | * You should have received a copy of the GNU General Public License | ||
| 23 | * along with The Inventra Controller Driver for Linux ; if not, | ||
| 24 | * write to the Free Software Foundation, Inc., 59 Temple Place, | ||
| 25 | * Suite 330, Boston, MA 02111-1307 USA | ||
| 26 | * | ||
| 27 | */ | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/kernel.h> | ||
| 30 | #include <linux/sched.h> | ||
| 31 | #include <linux/slab.h> | ||
| 32 | #include <linux/init.h> | ||
| 33 | #include <linux/list.h> | ||
| 34 | #include <linux/clk.h> | ||
| 35 | #include <linux/io.h> | ||
| 36 | |||
| 37 | #include <asm/mach-types.h> | ||
| 38 | #include <asm/arch/hardware.h> | ||
| 39 | #include <asm/arch/mux.h> | ||
| 40 | |||
| 41 | #include "musb_core.h" | ||
| 42 | #include "omap2430.h" | ||
| 43 | |||
| 44 | #ifdef CONFIG_ARCH_OMAP3430 | ||
| 45 | #define get_cpu_rev() 2 | ||
| 46 | #endif | ||
| 47 | |||
| 48 | #define MUSB_TIMEOUT_A_WAIT_BCON 1100 | ||
| 49 | |||
| 50 | static struct timer_list musb_idle_timer; | ||
| 51 | |||
| 52 | static void musb_do_idle(unsigned long _musb) | ||
| 53 | { | ||
| 54 | struct musb *musb = (void *)_musb; | ||
| 55 | unsigned long flags; | ||
| 56 | u8 power; | ||
| 57 | u8 devctl; | ||
| 58 | |||
| 59 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 60 | |||
| 61 | spin_lock_irqsave(&musb->lock, flags); | ||
| 62 | |||
| 63 | switch (musb->xceiv.state) { | ||
| 64 | case OTG_STATE_A_WAIT_BCON: | ||
| 65 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
| 66 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
| 67 | |||
| 68 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 69 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
| 70 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 71 | MUSB_DEV_MODE(musb); | ||
| 72 | } else { | ||
| 73 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 74 | MUSB_HST_MODE(musb); | ||
| 75 | } | ||
| 76 | break; | ||
| 77 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 78 | case OTG_STATE_A_SUSPEND: | ||
| 79 | /* finish RESUME signaling? */ | ||
| 80 | if (musb->port1_status & MUSB_PORT_STAT_RESUME) { | ||
| 81 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
| 82 | power &= ~MUSB_POWER_RESUME; | ||
| 83 | DBG(1, "root port resume stopped, power %02x\n", power); | ||
| 84 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
| 85 | musb->is_active = 1; | ||
| 86 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | ||
| 87 | | MUSB_PORT_STAT_RESUME); | ||
| 88 | musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
| 89 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
| 90 | /* NOTE: it might really be A_WAIT_BCON ... */ | ||
| 91 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
| 92 | } | ||
| 93 | break; | ||
| 94 | #endif | ||
| 95 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 96 | case OTG_STATE_A_HOST: | ||
| 97 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 98 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
| 99 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 100 | else | ||
| 101 | musb->xceiv.state = OTG_STATE_A_WAIT_BCON; | ||
| 102 | #endif | ||
| 103 | default: | ||
| 104 | break; | ||
| 105 | } | ||
| 106 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 107 | } | ||
| 108 | |||
| 109 | |||
| 110 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | ||
| 111 | { | ||
| 112 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | ||
| 113 | static unsigned long last_timer; | ||
| 114 | |||
| 115 | if (timeout == 0) | ||
| 116 | timeout = default_timeout; | ||
| 117 | |||
| 118 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
| 119 | if (musb->is_active || ((musb->a_wait_bcon == 0) | ||
| 120 | && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { | ||
| 121 | DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); | ||
| 122 | del_timer(&musb_idle_timer); | ||
| 123 | last_timer = jiffies; | ||
| 124 | return; | ||
| 125 | } | ||
| 126 | |||
| 127 | if (time_after(last_timer, timeout)) { | ||
| 128 | if (!timer_pending(&musb_idle_timer)) | ||
| 129 | last_timer = timeout; | ||
| 130 | else { | ||
| 131 | DBG(4, "Longer idle timer already pending, ignoring\n"); | ||
| 132 | return; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | last_timer = timeout; | ||
| 136 | |||
| 137 | DBG(4, "%s inactive, for idle timer for %lu ms\n", | ||
| 138 | otg_state_string(musb), | ||
| 139 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | ||
| 140 | mod_timer(&musb_idle_timer, timeout); | ||
| 141 | } | ||
| 142 | |||
| 143 | void musb_platform_enable(struct musb *musb) | ||
| 144 | { | ||
| 145 | } | ||
| 146 | void musb_platform_disable(struct musb *musb) | ||
| 147 | { | ||
| 148 | } | ||
| 149 | static void omap_vbus_power(struct musb *musb, int is_on, int sleeping) | ||
| 150 | { | ||
| 151 | } | ||
| 152 | |||
| 153 | static void omap_set_vbus(struct musb *musb, int is_on) | ||
| 154 | { | ||
| 155 | u8 devctl; | ||
| 156 | /* HDRC controls CPEN, but beware current surges during device | ||
| 157 | * connect. They can trigger transient overcurrent conditions | ||
| 158 | * that must be ignored. | ||
| 159 | */ | ||
| 160 | |||
| 161 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 162 | |||
| 163 | if (is_on) { | ||
| 164 | musb->is_active = 1; | ||
| 165 | musb->xceiv.default_a = 1; | ||
| 166 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
| 167 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 168 | |||
| 169 | MUSB_HST_MODE(musb); | ||
| 170 | } else { | ||
| 171 | musb->is_active = 0; | ||
| 172 | |||
| 173 | /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and | ||
| 174 | * jumping right to B_IDLE... | ||
| 175 | */ | ||
| 176 | |||
| 177 | musb->xceiv.default_a = 0; | ||
| 178 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 179 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
| 180 | |||
| 181 | MUSB_DEV_MODE(musb); | ||
| 182 | } | ||
| 183 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
| 184 | |||
| 185 | DBG(1, "VBUS %s, devctl %02x " | ||
| 186 | /* otg %3x conf %08x prcm %08x */ "\n", | ||
| 187 | otg_state_string(musb), | ||
| 188 | musb_readb(musb->mregs, MUSB_DEVCTL)); | ||
| 189 | } | ||
| 190 | static int omap_set_power(struct otg_transceiver *x, unsigned mA) | ||
| 191 | { | ||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | static int musb_platform_resume(struct musb *musb); | ||
| 196 | |||
| 197 | void musb_platform_set_mode(struct musb *musb, u8 musb_mode) | ||
| 198 | { | ||
| 199 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 200 | |||
| 201 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 202 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
| 203 | |||
| 204 | switch (musb_mode) { | ||
| 205 | case MUSB_HOST: | ||
| 206 | otg_set_host(&musb->xceiv, musb->xceiv.host); | ||
| 207 | break; | ||
| 208 | case MUSB_PERIPHERAL: | ||
| 209 | otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget); | ||
| 210 | break; | ||
| 211 | case MUSB_OTG: | ||
| 212 | break; | ||
| 213 | } | ||
| 214 | } | ||
| 215 | |||
| 216 | int __init musb_platform_init(struct musb *musb) | ||
| 217 | { | ||
| 218 | u32 l; | ||
| 219 | |||
| 220 | #if defined(CONFIG_ARCH_OMAP2430) | ||
| 221 | omap_cfg_reg(AE5_2430_USB0HS_STP); | ||
| 222 | #endif | ||
| 223 | |||
| 224 | musb_platform_resume(musb); | ||
| 225 | |||
| 226 | l = omap_readl(OTG_SYSCONFIG); | ||
| 227 | l &= ~ENABLEWAKEUP; /* disable wakeup */ | ||
| 228 | l &= ~NOSTDBY; /* remove possible nostdby */ | ||
| 229 | l |= SMARTSTDBY; /* enable smart standby */ | ||
| 230 | l &= ~AUTOIDLE; /* disable auto idle */ | ||
| 231 | l &= ~NOIDLE; /* remove possible noidle */ | ||
| 232 | l |= SMARTIDLE; /* enable smart idle */ | ||
| 233 | l |= AUTOIDLE; /* enable auto idle */ | ||
| 234 | omap_writel(l, OTG_SYSCONFIG); | ||
| 235 | |||
| 236 | l = omap_readl(OTG_INTERFSEL); | ||
| 237 | l |= ULPI_12PIN; | ||
| 238 | omap_writel(l, OTG_INTERFSEL); | ||
| 239 | |||
| 240 | pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " | ||
| 241 | "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", | ||
| 242 | omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), | ||
| 243 | omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), | ||
| 244 | omap_readl(OTG_SIMENABLE)); | ||
| 245 | |||
| 246 | omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); | ||
| 247 | |||
| 248 | if (is_host_enabled(musb)) | ||
| 249 | musb->board_set_vbus = omap_set_vbus; | ||
| 250 | if (is_peripheral_enabled(musb)) | ||
| 251 | musb->xceiv.set_power = omap_set_power; | ||
| 252 | musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON; | ||
| 253 | |||
| 254 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | ||
| 255 | |||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | int musb_platform_suspend(struct musb *musb) | ||
| 260 | { | ||
| 261 | u32 l; | ||
| 262 | |||
| 263 | if (!musb->clock) | ||
| 264 | return 0; | ||
| 265 | |||
| 266 | /* in any role */ | ||
| 267 | l = omap_readl(OTG_FORCESTDBY); | ||
| 268 | l |= ENABLEFORCE; /* enable MSTANDBY */ | ||
| 269 | omap_writel(l, OTG_FORCESTDBY); | ||
| 270 | |||
| 271 | l = omap_readl(OTG_SYSCONFIG); | ||
| 272 | l |= ENABLEWAKEUP; /* enable wakeup */ | ||
| 273 | omap_writel(l, OTG_SYSCONFIG); | ||
| 274 | |||
| 275 | if (musb->xceiv.set_suspend) | ||
| 276 | musb->xceiv.set_suspend(&musb->xceiv, 1); | ||
| 277 | |||
| 278 | if (musb->set_clock) | ||
| 279 | musb->set_clock(musb->clock, 0); | ||
| 280 | else | ||
| 281 | clk_disable(musb->clock); | ||
| 282 | |||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | static int musb_platform_resume(struct musb *musb) | ||
| 287 | { | ||
| 288 | u32 l; | ||
| 289 | |||
| 290 | if (!musb->clock) | ||
| 291 | return 0; | ||
| 292 | |||
| 293 | if (musb->xceiv.set_suspend) | ||
| 294 | musb->xceiv.set_suspend(&musb->xceiv, 0); | ||
| 295 | |||
| 296 | if (musb->set_clock) | ||
| 297 | musb->set_clock(musb->clock, 1); | ||
| 298 | else | ||
| 299 | clk_enable(musb->clock); | ||
| 300 | |||
| 301 | l = omap_readl(OTG_SYSCONFIG); | ||
| 302 | l &= ~ENABLEWAKEUP; /* disable wakeup */ | ||
| 303 | omap_writel(l, OTG_SYSCONFIG); | ||
| 304 | |||
| 305 | l = omap_readl(OTG_FORCESTDBY); | ||
| 306 | l &= ~ENABLEFORCE; /* disable MSTANDBY */ | ||
| 307 | omap_writel(l, OTG_FORCESTDBY); | ||
| 308 | |||
| 309 | return 0; | ||
| 310 | } | ||
| 311 | |||
| 312 | |||
| 313 | int musb_platform_exit(struct musb *musb) | ||
| 314 | { | ||
| 315 | |||
| 316 | omap_vbus_power(musb, 0 /*off*/, 1); | ||
| 317 | |||
| 318 | musb_platform_suspend(musb); | ||
| 319 | |||
| 320 | clk_put(musb->clock); | ||
| 321 | musb->clock = 0; | ||
| 322 | |||
| 323 | return 0; | ||
| 324 | } | ||
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h new file mode 100644 index 000000000000..786a62071f72 --- /dev/null +++ b/drivers/usb/musb/omap2430.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
| 3 | * | ||
| 4 | * The Inventra Controller Driver for Linux is free software; you | ||
| 5 | * can redistribute it and/or modify it under the terms of the GNU | ||
| 6 | * General Public License version 2 as published by the Free Software | ||
| 7 | * Foundation. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __MUSB_OMAP243X_H__ | ||
| 11 | #define __MUSB_OMAP243X_H__ | ||
| 12 | |||
| 13 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | ||
| 14 | #include <asm/arch/hardware.h> | ||
| 15 | #include <asm/arch/usb.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * OMAP2430-specific definitions | ||
| 19 | */ | ||
| 20 | |||
| 21 | #define MENTOR_BASE_OFFSET 0 | ||
| 22 | #if defined(CONFIG_ARCH_OMAP2430) | ||
| 23 | #define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) | ||
| 24 | #elif defined(CONFIG_ARCH_OMAP3430) | ||
| 25 | #define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE) | ||
| 26 | #endif | ||
| 27 | #define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset)) | ||
| 28 | #define OTG_REVISION OMAP_HSOTG(0x0) | ||
| 29 | #define OTG_SYSCONFIG OMAP_HSOTG(0x4) | ||
| 30 | # define MIDLEMODE 12 /* bit position */ | ||
| 31 | # define FORCESTDBY (0 << MIDLEMODE) | ||
| 32 | # define NOSTDBY (1 << MIDLEMODE) | ||
| 33 | # define SMARTSTDBY (2 << MIDLEMODE) | ||
| 34 | # define SIDLEMODE 3 /* bit position */ | ||
| 35 | # define FORCEIDLE (0 << SIDLEMODE) | ||
| 36 | # define NOIDLE (1 << SIDLEMODE) | ||
| 37 | # define SMARTIDLE (2 << SIDLEMODE) | ||
| 38 | # define ENABLEWAKEUP (1 << 2) | ||
| 39 | # define SOFTRST (1 << 1) | ||
| 40 | # define AUTOIDLE (1 << 0) | ||
| 41 | #define OTG_SYSSTATUS OMAP_HSOTG(0x8) | ||
| 42 | # define RESETDONE (1 << 0) | ||
| 43 | #define OTG_INTERFSEL OMAP_HSOTG(0xc) | ||
| 44 | # define EXTCP (1 << 2) | ||
| 45 | # define PHYSEL 0 /* bit position */ | ||
| 46 | # define UTMI_8BIT (0 << PHYSEL) | ||
| 47 | # define ULPI_12PIN (1 << PHYSEL) | ||
| 48 | # define ULPI_8PIN (2 << PHYSEL) | ||
| 49 | #define OTG_SIMENABLE OMAP_HSOTG(0x10) | ||
| 50 | # define TM1 (1 << 0) | ||
| 51 | #define OTG_FORCESTDBY OMAP_HSOTG(0x14) | ||
| 52 | # define ENABLEFORCE (1 << 0) | ||
| 53 | |||
| 54 | #endif /* CONFIG_ARCH_OMAP2430 */ | ||
| 55 | |||
| 56 | #endif /* __MUSB_OMAP243X_H__ */ | ||
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c new file mode 100644 index 000000000000..b73b036f3d77 --- /dev/null +++ b/drivers/usb/musb/tusb6010.c | |||
| @@ -0,0 +1,1151 @@ | |||
| 1 | /* | ||
| 2 | * TUSB6010 USB 2.0 OTG Dual Role controller | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 Nokia Corporation | ||
| 5 | * Jarkko Nikula <jarkko.nikula@nokia.com> | ||
| 6 | * Tony Lindgren <tony@atomide.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * Notes: | ||
| 13 | * - Driver assumes that interface to external host (main CPU) is | ||
| 14 | * configured for NOR FLASH interface instead of VLYNQ serial | ||
| 15 | * interface. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/kernel.h> | ||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/usb.h> | ||
| 23 | #include <linux/irq.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | |||
| 26 | #include "musb_core.h" | ||
| 27 | |||
| 28 | static void tusb_source_power(struct musb *musb, int is_on); | ||
| 29 | |||
| 30 | #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) | ||
| 31 | #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Checks the revision. We need to use the DMA register as 3.0 does not | ||
| 35 | * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. | ||
| 36 | */ | ||
| 37 | u8 tusb_get_revision(struct musb *musb) | ||
| 38 | { | ||
| 39 | void __iomem *tbase = musb->ctrl_base; | ||
| 40 | u32 die_id; | ||
| 41 | u8 rev; | ||
| 42 | |||
| 43 | rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff; | ||
| 44 | if (TUSB_REV_MAJOR(rev) == 3) { | ||
| 45 | die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, | ||
| 46 | TUSB_DIDR1_HI)); | ||
| 47 | if (die_id >= TUSB_DIDR1_HI_REV_31) | ||
| 48 | rev |= 1; | ||
| 49 | } | ||
| 50 | |||
| 51 | return rev; | ||
| 52 | } | ||
| 53 | |||
| 54 | static int __init tusb_print_revision(struct musb *musb) | ||
| 55 | { | ||
| 56 | void __iomem *tbase = musb->ctrl_base; | ||
| 57 | u8 rev; | ||
| 58 | |||
| 59 | rev = tusb_get_revision(musb); | ||
| 60 | |||
| 61 | pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n", | ||
| 62 | "prcm", | ||
| 63 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)), | ||
| 64 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)), | ||
| 65 | "int", | ||
| 66 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), | ||
| 67 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), | ||
| 68 | "gpio", | ||
| 69 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)), | ||
| 70 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)), | ||
| 71 | "dma", | ||
| 72 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), | ||
| 73 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), | ||
| 74 | "dieid", | ||
| 75 | TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)), | ||
| 76 | "rev", | ||
| 77 | TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev)); | ||
| 78 | |||
| 79 | return tusb_get_revision(musb); | ||
| 80 | } | ||
| 81 | |||
| 82 | #define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \ | ||
| 83 | | TUSB_PHY_OTG_CTRL_TESTM0) | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0. | ||
| 87 | * Disables power detection in PHY for the duration of idle. | ||
| 88 | */ | ||
| 89 | static void tusb_wbus_quirk(struct musb *musb, int enabled) | ||
| 90 | { | ||
| 91 | void __iomem *tbase = musb->ctrl_base; | ||
| 92 | static u32 phy_otg_ctrl, phy_otg_ena; | ||
| 93 | u32 tmp; | ||
| 94 | |||
| 95 | if (enabled) { | ||
| 96 | phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); | ||
| 97 | phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); | ||
| 98 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | ||
| 99 | | phy_otg_ena | WBUS_QUIRK_MASK; | ||
| 100 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); | ||
| 101 | tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; | ||
| 102 | tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; | ||
| 103 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); | ||
| 104 | DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", | ||
| 105 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), | ||
| 106 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); | ||
| 107 | } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) | ||
| 108 | & TUSB_PHY_OTG_CTRL_TESTM2) { | ||
| 109 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl; | ||
| 110 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); | ||
| 111 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; | ||
| 112 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); | ||
| 113 | DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", | ||
| 114 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), | ||
| 115 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); | ||
| 116 | phy_otg_ctrl = 0; | ||
| 117 | phy_otg_ena = 0; | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | /* | ||
| 122 | * TUSB 6010 may use a parallel bus that doesn't support byte ops; | ||
| 123 | * so both loading and unloading FIFOs need explicit byte counts. | ||
| 124 | */ | ||
| 125 | |||
| 126 | static inline void | ||
| 127 | tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len) | ||
| 128 | { | ||
| 129 | u32 val; | ||
| 130 | int i; | ||
| 131 | |||
| 132 | if (len > 4) { | ||
| 133 | for (i = 0; i < (len >> 2); i++) { | ||
| 134 | memcpy(&val, buf, 4); | ||
| 135 | musb_writel(fifo, 0, val); | ||
| 136 | buf += 4; | ||
| 137 | } | ||
| 138 | len %= 4; | ||
| 139 | } | ||
| 140 | if (len > 0) { | ||
| 141 | /* Write the rest 1 - 3 bytes to FIFO */ | ||
| 142 | memcpy(&val, buf, len); | ||
| 143 | musb_writel(fifo, 0, val); | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | static inline void tusb_fifo_read_unaligned(void __iomem *fifo, | ||
| 148 | void __iomem *buf, u16 len) | ||
| 149 | { | ||
| 150 | u32 val; | ||
| 151 | int i; | ||
| 152 | |||
| 153 | if (len > 4) { | ||
| 154 | for (i = 0; i < (len >> 2); i++) { | ||
| 155 | val = musb_readl(fifo, 0); | ||
| 156 | memcpy(buf, &val, 4); | ||
| 157 | buf += 4; | ||
| 158 | } | ||
| 159 | len %= 4; | ||
| 160 | } | ||
| 161 | if (len > 0) { | ||
| 162 | /* Read the rest 1 - 3 bytes from FIFO */ | ||
| 163 | val = musb_readl(fifo, 0); | ||
| 164 | memcpy(buf, &val, len); | ||
| 165 | } | ||
| 166 | } | ||
| 167 | |||
| 168 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) | ||
| 169 | { | ||
| 170 | void __iomem *ep_conf = hw_ep->conf; | ||
| 171 | void __iomem *fifo = hw_ep->fifo; | ||
| 172 | u8 epnum = hw_ep->epnum; | ||
| 173 | |||
| 174 | prefetch(buf); | ||
| 175 | |||
| 176 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
| 177 | 'T', epnum, fifo, len, buf); | ||
| 178 | |||
| 179 | if (epnum) | ||
| 180 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, | ||
| 181 | TUSB_EP_CONFIG_XFR_SIZE(len)); | ||
| 182 | else | ||
| 183 | musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX | | ||
| 184 | TUSB_EP0_CONFIG_XFR_SIZE(len)); | ||
| 185 | |||
| 186 | if (likely((0x01 & (unsigned long) buf) == 0)) { | ||
| 187 | |||
| 188 | /* Best case is 32bit-aligned destination address */ | ||
| 189 | if ((0x02 & (unsigned long) buf) == 0) { | ||
| 190 | if (len >= 4) { | ||
| 191 | writesl(fifo, buf, len >> 2); | ||
| 192 | buf += (len & ~0x03); | ||
| 193 | len &= 0x03; | ||
| 194 | } | ||
| 195 | } else { | ||
| 196 | if (len >= 2) { | ||
| 197 | u32 val; | ||
| 198 | int i; | ||
| 199 | |||
| 200 | /* Cannot use writesw, fifo is 32-bit */ | ||
| 201 | for (i = 0; i < (len >> 2); i++) { | ||
| 202 | val = (u32)(*(u16 *)buf); | ||
| 203 | buf += 2; | ||
| 204 | val |= (*(u16 *)buf) << 16; | ||
| 205 | buf += 2; | ||
| 206 | musb_writel(fifo, 0, val); | ||
| 207 | } | ||
| 208 | len &= 0x03; | ||
| 209 | } | ||
| 210 | } | ||
| 211 | } | ||
| 212 | |||
| 213 | if (len > 0) | ||
| 214 | tusb_fifo_write_unaligned(fifo, buf, len); | ||
| 215 | } | ||
| 216 | |||
| 217 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) | ||
| 218 | { | ||
| 219 | void __iomem *ep_conf = hw_ep->conf; | ||
| 220 | void __iomem *fifo = hw_ep->fifo; | ||
| 221 | u8 epnum = hw_ep->epnum; | ||
| 222 | |||
| 223 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
| 224 | 'R', epnum, fifo, len, buf); | ||
| 225 | |||
| 226 | if (epnum) | ||
| 227 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, | ||
| 228 | TUSB_EP_CONFIG_XFR_SIZE(len)); | ||
| 229 | else | ||
| 230 | musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len)); | ||
| 231 | |||
| 232 | if (likely((0x01 & (unsigned long) buf) == 0)) { | ||
| 233 | |||
| 234 | /* Best case is 32bit-aligned destination address */ | ||
| 235 | if ((0x02 & (unsigned long) buf) == 0) { | ||
| 236 | if (len >= 4) { | ||
| 237 | readsl(fifo, buf, len >> 2); | ||
| 238 | buf += (len & ~0x03); | ||
| 239 | len &= 0x03; | ||
| 240 | } | ||
| 241 | } else { | ||
| 242 | if (len >= 2) { | ||
| 243 | u32 val; | ||
| 244 | int i; | ||
| 245 | |||
| 246 | /* Cannot use readsw, fifo is 32-bit */ | ||
| 247 | for (i = 0; i < (len >> 2); i++) { | ||
| 248 | val = musb_readl(fifo, 0); | ||
| 249 | *(u16 *)buf = (u16)(val & 0xffff); | ||
| 250 | buf += 2; | ||
| 251 | *(u16 *)buf = (u16)(val >> 16); | ||
| 252 | buf += 2; | ||
| 253 | } | ||
| 254 | len &= 0x03; | ||
| 255 | } | ||
| 256 | } | ||
| 257 | } | ||
| 258 | |||
| 259 | if (len > 0) | ||
| 260 | tusb_fifo_read_unaligned(fifo, buf, len); | ||
| 261 | } | ||
| 262 | |||
| 263 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 264 | |||
| 265 | /* This is used by gadget drivers, and OTG transceiver logic, allowing | ||
| 266 | * at most mA current to be drawn from VBUS during a Default-B session | ||
| 267 | * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host | ||
| 268 | * mode), or low power Default-B sessions, something else supplies power. | ||
| 269 | * Caller must take care of locking. | ||
| 270 | */ | ||
| 271 | static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) | ||
| 272 | { | ||
| 273 | struct musb *musb = container_of(x, struct musb, xceiv); | ||
| 274 | void __iomem *tbase = musb->ctrl_base; | ||
| 275 | u32 reg; | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Keep clock active when enabled. Note that this is not tied to | ||
| 279 | * drawing VBUS, as with OTG mA can be less than musb->min_power. | ||
| 280 | */ | ||
| 281 | if (musb->set_clock) { | ||
| 282 | if (mA) | ||
| 283 | musb->set_clock(musb->clock, 1); | ||
| 284 | else | ||
| 285 | musb->set_clock(musb->clock, 0); | ||
| 286 | } | ||
| 287 | |||
| 288 | /* tps65030 seems to consume max 100mA, with maybe 60mA available | ||
| 289 | * (measured on one board) for things other than tps and tusb. | ||
| 290 | * | ||
| 291 | * Boards sharing the CPU clock with CLKIN will need to prevent | ||
| 292 | * certain idle sleep states while the USB link is active. | ||
| 293 | * | ||
| 294 | * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }. | ||
| 295 | * The actual current usage would be very board-specific. For now, | ||
| 296 | * it's simpler to just use an aggregate (also board-specific). | ||
| 297 | */ | ||
| 298 | if (x->default_a || mA < (musb->min_power << 1)) | ||
| 299 | mA = 0; | ||
| 300 | |||
| 301 | reg = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
| 302 | if (mA) { | ||
| 303 | musb->is_bus_powered = 1; | ||
| 304 | reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN; | ||
| 305 | } else { | ||
| 306 | musb->is_bus_powered = 0; | ||
| 307 | reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); | ||
| 308 | } | ||
| 309 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); | ||
| 310 | |||
| 311 | DBG(2, "draw max %d mA VBUS\n", mA); | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | #else | ||
| 316 | #define tusb_draw_power NULL | ||
| 317 | #endif | ||
| 318 | |||
| 319 | /* workaround for issue 13: change clock during chip idle | ||
| 320 | * (to be fixed in rev3 silicon) ... symptoms include disconnect | ||
| 321 | * or looping suspend/resume cycles | ||
| 322 | */ | ||
| 323 | static void tusb_set_clock_source(struct musb *musb, unsigned mode) | ||
| 324 | { | ||
| 325 | void __iomem *tbase = musb->ctrl_base; | ||
| 326 | u32 reg; | ||
| 327 | |||
| 328 | reg = musb_readl(tbase, TUSB_PRCM_CONF); | ||
| 329 | reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3); | ||
| 330 | |||
| 331 | /* 0 = refclk (clkin, XI) | ||
| 332 | * 1 = PHY 60 MHz (internal PLL) | ||
| 333 | * 2 = not supported | ||
| 334 | * 3 = what? | ||
| 335 | */ | ||
| 336 | if (mode > 0) | ||
| 337 | reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3); | ||
| 338 | |||
| 339 | musb_writel(tbase, TUSB_PRCM_CONF, reg); | ||
| 340 | |||
| 341 | /* FIXME tusb6010_platform_retime(mode == 0); */ | ||
| 342 | } | ||
| 343 | |||
| 344 | /* | ||
| 345 | * Idle TUSB6010 until next wake-up event; NOR access always wakes. | ||
| 346 | * Other code ensures that we idle unless we're connected _and_ the | ||
| 347 | * USB link is not suspended ... and tells us the relevant wakeup | ||
| 348 | * events. SW_EN for voltage is handled separately. | ||
| 349 | */ | ||
| 350 | void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) | ||
| 351 | { | ||
| 352 | void __iomem *tbase = musb->ctrl_base; | ||
| 353 | u32 reg; | ||
| 354 | |||
| 355 | if ((wakeup_enables & TUSB_PRCM_WBUS) | ||
| 356 | && (tusb_get_revision(musb) == TUSB_REV_30)) | ||
| 357 | tusb_wbus_quirk(musb, 1); | ||
| 358 | |||
| 359 | tusb_set_clock_source(musb, 0); | ||
| 360 | |||
| 361 | wakeup_enables |= TUSB_PRCM_WNORCS; | ||
| 362 | musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables); | ||
| 363 | |||
| 364 | /* REVISIT writeup of WID implies that if WID set and ID is grounded, | ||
| 365 | * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared. | ||
| 366 | * Presumably that's mostly to save power, hence WID is immaterial ... | ||
| 367 | */ | ||
| 368 | |||
| 369 | reg = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
| 370 | /* issue 4: when driving vbus, use hipower (vbus_det) comparator */ | ||
| 371 | if (is_host_active(musb)) { | ||
| 372 | reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; | ||
| 373 | reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN; | ||
| 374 | } else { | ||
| 375 | reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN; | ||
| 376 | reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; | ||
| 377 | } | ||
| 378 | reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; | ||
| 379 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); | ||
| 380 | |||
| 381 | DBG(6, "idle, wake on %02x\n", wakeup_enables); | ||
| 382 | } | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Updates cable VBUS status. Caller must take care of locking. | ||
| 386 | */ | ||
| 387 | int musb_platform_get_vbus_status(struct musb *musb) | ||
| 388 | { | ||
| 389 | void __iomem *tbase = musb->ctrl_base; | ||
| 390 | u32 otg_stat, prcm_mngmt; | ||
| 391 | int ret = 0; | ||
| 392 | |||
| 393 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
| 394 | prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
| 395 | |||
| 396 | /* Temporarily enable VBUS detection if it was disabled for | ||
| 397 | * suspend mode. Unless it's enabled otg_stat and devctl will | ||
| 398 | * not show correct VBUS state. | ||
| 399 | */ | ||
| 400 | if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) { | ||
| 401 | u32 tmp = prcm_mngmt; | ||
| 402 | tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; | ||
| 403 | musb_writel(tbase, TUSB_PRCM_MNGMT, tmp); | ||
| 404 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
| 405 | musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt); | ||
| 406 | } | ||
| 407 | |||
| 408 | if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) | ||
| 409 | ret = 1; | ||
| 410 | |||
| 411 | return ret; | ||
| 412 | } | ||
| 413 | |||
| 414 | static struct timer_list musb_idle_timer; | ||
| 415 | |||
| 416 | static void musb_do_idle(unsigned long _musb) | ||
| 417 | { | ||
| 418 | struct musb *musb = (void *)_musb; | ||
| 419 | unsigned long flags; | ||
| 420 | |||
| 421 | spin_lock_irqsave(&musb->lock, flags); | ||
| 422 | |||
| 423 | switch (musb->xceiv.state) { | ||
| 424 | case OTG_STATE_A_WAIT_BCON: | ||
| 425 | if ((musb->a_wait_bcon != 0) | ||
| 426 | && (musb->idle_timeout == 0 | ||
| 427 | || time_after(jiffies, musb->idle_timeout))) { | ||
| 428 | DBG(4, "Nothing connected %s, turning off VBUS\n", | ||
| 429 | otg_state_string(musb)); | ||
| 430 | } | ||
| 431 | /* FALLTHROUGH */ | ||
| 432 | case OTG_STATE_A_IDLE: | ||
| 433 | tusb_source_power(musb, 0); | ||
| 434 | default: | ||
| 435 | break; | ||
| 436 | } | ||
| 437 | |||
| 438 | if (!musb->is_active) { | ||
| 439 | u32 wakeups; | ||
| 440 | |||
| 441 | /* wait until khubd handles port change status */ | ||
| 442 | if (is_host_active(musb) && (musb->port1_status >> 16)) | ||
| 443 | goto done; | ||
| 444 | |||
| 445 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 446 | if (is_peripheral_enabled(musb) && !musb->gadget_driver) | ||
| 447 | wakeups = 0; | ||
| 448 | else { | ||
| 449 | wakeups = TUSB_PRCM_WHOSTDISCON | ||
| 450 | | TUSB_PRCM_WBUS | ||
| 451 | | TUSB_PRCM_WVBUS; | ||
| 452 | if (is_otg_enabled(musb)) | ||
| 453 | wakeups |= TUSB_PRCM_WID; | ||
| 454 | } | ||
| 455 | #else | ||
| 456 | wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS; | ||
| 457 | #endif | ||
| 458 | tusb_allow_idle(musb, wakeups); | ||
| 459 | } | ||
| 460 | done: | ||
| 461 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 462 | } | ||
| 463 | |||
| 464 | /* | ||
| 465 | * Maybe put TUSB6010 into idle mode mode depending on USB link status, | ||
| 466 | * like "disconnected" or "suspended". We'll be woken out of it by | ||
| 467 | * connect, resume, or disconnect. | ||
| 468 | * | ||
| 469 | * Needs to be called as the last function everywhere where there is | ||
| 470 | * register access to TUSB6010 because of NOR flash wake-up. | ||
| 471 | * Caller should own controller spinlock. | ||
| 472 | * | ||
| 473 | * Delay because peripheral enables D+ pullup 3msec after SE0, and | ||
| 474 | * we don't want to treat that full speed J as a wakeup event. | ||
| 475 | * ... peripherals must draw only suspend current after 10 msec. | ||
| 476 | */ | ||
| 477 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | ||
| 478 | { | ||
| 479 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | ||
| 480 | static unsigned long last_timer; | ||
| 481 | |||
| 482 | if (timeout == 0) | ||
| 483 | timeout = default_timeout; | ||
| 484 | |||
| 485 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
| 486 | if (musb->is_active || ((musb->a_wait_bcon == 0) | ||
| 487 | && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { | ||
| 488 | DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); | ||
| 489 | del_timer(&musb_idle_timer); | ||
| 490 | last_timer = jiffies; | ||
| 491 | return; | ||
| 492 | } | ||
| 493 | |||
| 494 | if (time_after(last_timer, timeout)) { | ||
| 495 | if (!timer_pending(&musb_idle_timer)) | ||
| 496 | last_timer = timeout; | ||
| 497 | else { | ||
| 498 | DBG(4, "Longer idle timer already pending, ignoring\n"); | ||
| 499 | return; | ||
| 500 | } | ||
| 501 | } | ||
| 502 | last_timer = timeout; | ||
| 503 | |||
| 504 | DBG(4, "%s inactive, for idle timer for %lu ms\n", | ||
| 505 | otg_state_string(musb), | ||
| 506 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | ||
| 507 | mod_timer(&musb_idle_timer, timeout); | ||
| 508 | } | ||
| 509 | |||
| 510 | /* ticks of 60 MHz clock */ | ||
| 511 | #define DEVCLOCK 60000000 | ||
| 512 | #define OTG_TIMER_MS(msecs) ((msecs) \ | ||
| 513 | ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \ | ||
| 514 | | TUSB_DEV_OTG_TIMER_ENABLE) \ | ||
| 515 | : 0) | ||
| 516 | |||
| 517 | static void tusb_source_power(struct musb *musb, int is_on) | ||
| 518 | { | ||
| 519 | void __iomem *tbase = musb->ctrl_base; | ||
| 520 | u32 conf, prcm, timer; | ||
| 521 | u8 devctl; | ||
| 522 | |||
| 523 | /* HDRC controls CPEN, but beware current surges during device | ||
| 524 | * connect. They can trigger transient overcurrent conditions | ||
| 525 | * that must be ignored. | ||
| 526 | */ | ||
| 527 | |||
| 528 | prcm = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
| 529 | conf = musb_readl(tbase, TUSB_DEV_CONF); | ||
| 530 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 531 | |||
| 532 | if (is_on) { | ||
| 533 | if (musb->set_clock) | ||
| 534 | musb->set_clock(musb->clock, 1); | ||
| 535 | timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); | ||
| 536 | musb->xceiv.default_a = 1; | ||
| 537 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
| 538 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 539 | |||
| 540 | conf |= TUSB_DEV_CONF_USB_HOST_MODE; | ||
| 541 | MUSB_HST_MODE(musb); | ||
| 542 | } else { | ||
| 543 | u32 otg_stat; | ||
| 544 | |||
| 545 | timer = 0; | ||
| 546 | |||
| 547 | /* If ID pin is grounded, we want to be a_idle */ | ||
| 548 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
| 549 | if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { | ||
| 550 | switch (musb->xceiv.state) { | ||
| 551 | case OTG_STATE_A_WAIT_VRISE: | ||
| 552 | case OTG_STATE_A_WAIT_BCON: | ||
| 553 | musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; | ||
| 554 | break; | ||
| 555 | case OTG_STATE_A_WAIT_VFALL: | ||
| 556 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 557 | break; | ||
| 558 | default: | ||
| 559 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
| 560 | } | ||
| 561 | musb->is_active = 0; | ||
| 562 | musb->xceiv.default_a = 1; | ||
| 563 | MUSB_HST_MODE(musb); | ||
| 564 | } else { | ||
| 565 | musb->is_active = 0; | ||
| 566 | musb->xceiv.default_a = 0; | ||
| 567 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 568 | MUSB_DEV_MODE(musb); | ||
| 569 | } | ||
| 570 | |||
| 571 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
| 572 | conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; | ||
| 573 | if (musb->set_clock) | ||
| 574 | musb->set_clock(musb->clock, 0); | ||
| 575 | } | ||
| 576 | prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); | ||
| 577 | |||
| 578 | musb_writel(tbase, TUSB_PRCM_MNGMT, prcm); | ||
| 579 | musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer); | ||
| 580 | musb_writel(tbase, TUSB_DEV_CONF, conf); | ||
| 581 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
| 582 | |||
| 583 | DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", | ||
| 584 | otg_state_string(musb), | ||
| 585 | musb_readb(musb->mregs, MUSB_DEVCTL), | ||
| 586 | musb_readl(tbase, TUSB_DEV_OTG_STAT), | ||
| 587 | conf, prcm); | ||
| 588 | } | ||
| 589 | |||
| 590 | /* | ||
| 591 | * Sets the mode to OTG, peripheral or host by changing the ID detection. | ||
| 592 | * Caller must take care of locking. | ||
| 593 | * | ||
| 594 | * Note that if a mini-A cable is plugged in the ID line will stay down as | ||
| 595 | * the weak ID pull-up is not able to pull the ID up. | ||
| 596 | * | ||
| 597 | * REVISIT: It would be possible to add support for changing between host | ||
| 598 | * and peripheral modes in non-OTG configurations by reconfiguring hardware | ||
| 599 | * and then setting musb->board_mode. For now, only support OTG mode. | ||
| 600 | */ | ||
| 601 | void musb_platform_set_mode(struct musb *musb, u8 musb_mode) | ||
| 602 | { | ||
| 603 | void __iomem *tbase = musb->ctrl_base; | ||
| 604 | u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; | ||
| 605 | |||
| 606 | if (musb->board_mode != MUSB_OTG) { | ||
| 607 | ERR("Changing mode currently only supported in OTG mode\n"); | ||
| 608 | return; | ||
| 609 | } | ||
| 610 | |||
| 611 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
| 612 | phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); | ||
| 613 | phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); | ||
| 614 | dev_conf = musb_readl(tbase, TUSB_DEV_CONF); | ||
| 615 | |||
| 616 | switch (musb_mode) { | ||
| 617 | |||
| 618 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 619 | case MUSB_HOST: /* Disable PHY ID detect, ground ID */ | ||
| 620 | phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 621 | phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 622 | dev_conf |= TUSB_DEV_CONF_ID_SEL; | ||
| 623 | dev_conf &= ~TUSB_DEV_CONF_SOFT_ID; | ||
| 624 | break; | ||
| 625 | #endif | ||
| 626 | |||
| 627 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
| 628 | case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */ | ||
| 629 | phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 630 | phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 631 | dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); | ||
| 632 | break; | ||
| 633 | #endif | ||
| 634 | |||
| 635 | #ifdef CONFIG_USB_MUSB_OTG | ||
| 636 | case MUSB_OTG: /* Use PHY ID detection */ | ||
| 637 | phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 638 | phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 639 | dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); | ||
| 640 | break; | ||
| 641 | #endif | ||
| 642 | |||
| 643 | default: | ||
| 644 | DBG(2, "Trying to set unknown mode %i\n", musb_mode); | ||
| 645 | } | ||
| 646 | |||
| 647 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, | ||
| 648 | TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl); | ||
| 649 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, | ||
| 650 | TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena); | ||
| 651 | musb_writel(tbase, TUSB_DEV_CONF, dev_conf); | ||
| 652 | |||
| 653 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
| 654 | if ((musb_mode == MUSB_PERIPHERAL) && | ||
| 655 | !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) | ||
| 656 | INFO("Cannot be peripheral with mini-A cable " | ||
| 657 | "otg_stat: %08x\n", otg_stat); | ||
| 658 | } | ||
| 659 | |||
| 660 | static inline unsigned long | ||
| 661 | tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | ||
| 662 | { | ||
| 663 | u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
| 664 | unsigned long idle_timeout = 0; | ||
| 665 | |||
| 666 | /* ID pin */ | ||
| 667 | if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) { | ||
| 668 | int default_a; | ||
| 669 | |||
| 670 | if (is_otg_enabled(musb)) | ||
| 671 | default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); | ||
| 672 | else | ||
| 673 | default_a = is_host_enabled(musb); | ||
| 674 | DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); | ||
| 675 | musb->xceiv.default_a = default_a; | ||
| 676 | tusb_source_power(musb, default_a); | ||
| 677 | |||
| 678 | /* Don't allow idling immediately */ | ||
| 679 | if (default_a) | ||
| 680 | idle_timeout = jiffies + (HZ * 3); | ||
| 681 | } | ||
| 682 | |||
| 683 | /* VBUS state change */ | ||
| 684 | if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { | ||
| 685 | |||
| 686 | /* B-dev state machine: no vbus ~= disconnect */ | ||
| 687 | if ((is_otg_enabled(musb) && !musb->xceiv.default_a) | ||
| 688 | || !is_host_enabled(musb)) { | ||
| 689 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
| 690 | /* ? musb_root_disconnect(musb); */ | ||
| 691 | musb->port1_status &= | ||
| 692 | ~(USB_PORT_STAT_CONNECTION | ||
| 693 | | USB_PORT_STAT_ENABLE | ||
| 694 | | USB_PORT_STAT_LOW_SPEED | ||
| 695 | | USB_PORT_STAT_HIGH_SPEED | ||
| 696 | | USB_PORT_STAT_TEST | ||
| 697 | ); | ||
| 698 | #endif | ||
| 699 | |||
| 700 | if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { | ||
| 701 | DBG(1, "Forcing disconnect (no interrupt)\n"); | ||
| 702 | if (musb->xceiv.state != OTG_STATE_B_IDLE) { | ||
| 703 | /* INTR_DISCONNECT can hide... */ | ||
| 704 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
| 705 | musb->int_usb |= MUSB_INTR_DISCONNECT; | ||
| 706 | } | ||
| 707 | musb->is_active = 0; | ||
| 708 | } | ||
| 709 | DBG(2, "vbus change, %s, otg %03x\n", | ||
| 710 | otg_state_string(musb), otg_stat); | ||
| 711 | idle_timeout = jiffies + (1 * HZ); | ||
| 712 | schedule_work(&musb->irq_work); | ||
| 713 | |||
| 714 | } else /* A-dev state machine */ { | ||
| 715 | DBG(2, "vbus change, %s, otg %03x\n", | ||
| 716 | otg_state_string(musb), otg_stat); | ||
| 717 | |||
| 718 | switch (musb->xceiv.state) { | ||
| 719 | case OTG_STATE_A_IDLE: | ||
| 720 | DBG(2, "Got SRP, turning on VBUS\n"); | ||
| 721 | musb_set_vbus(musb, 1); | ||
| 722 | |||
| 723 | /* CONNECT can wake if a_wait_bcon is set */ | ||
| 724 | if (musb->a_wait_bcon != 0) | ||
| 725 | musb->is_active = 0; | ||
| 726 | else | ||
| 727 | musb->is_active = 1; | ||
| 728 | |||
| 729 | /* | ||
| 730 | * OPT FS A TD.4.6 needs few seconds for | ||
| 731 | * A_WAIT_VRISE | ||
| 732 | */ | ||
| 733 | idle_timeout = jiffies + (2 * HZ); | ||
| 734 | |||
| 735 | break; | ||
| 736 | case OTG_STATE_A_WAIT_VRISE: | ||
| 737 | /* ignore; A-session-valid < VBUS_VALID/2, | ||
| 738 | * we monitor this with the timer | ||
| 739 | */ | ||
| 740 | break; | ||
| 741 | case OTG_STATE_A_WAIT_VFALL: | ||
| 742 | /* REVISIT this irq triggers during short | ||
| 743 | * spikes caused by enumeration ... | ||
| 744 | */ | ||
| 745 | if (musb->vbuserr_retry) { | ||
| 746 | musb->vbuserr_retry--; | ||
| 747 | tusb_source_power(musb, 1); | ||
| 748 | } else { | ||
| 749 | musb->vbuserr_retry | ||
| 750 | = VBUSERR_RETRY_COUNT; | ||
| 751 | tusb_source_power(musb, 0); | ||
| 752 | } | ||
| 753 | break; | ||
| 754 | default: | ||
| 755 | break; | ||
| 756 | } | ||
| 757 | } | ||
| 758 | } | ||
| 759 | |||
| 760 | /* OTG timer expiration */ | ||
| 761 | if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { | ||
| 762 | u8 devctl; | ||
| 763 | |||
| 764 | DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); | ||
| 765 | |||
| 766 | switch (musb->xceiv.state) { | ||
| 767 | case OTG_STATE_A_WAIT_VRISE: | ||
| 768 | /* VBUS has probably been valid for a while now, | ||
| 769 | * but may well have bounced out of range a bit | ||
| 770 | */ | ||
| 771 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 772 | if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { | ||
| 773 | if ((devctl & MUSB_DEVCTL_VBUS) | ||
| 774 | != MUSB_DEVCTL_VBUS) { | ||
| 775 | DBG(2, "devctl %02x\n", devctl); | ||
| 776 | break; | ||
| 777 | } | ||
| 778 | musb->xceiv.state = OTG_STATE_A_WAIT_BCON; | ||
| 779 | musb->is_active = 0; | ||
| 780 | idle_timeout = jiffies | ||
| 781 | + msecs_to_jiffies(musb->a_wait_bcon); | ||
| 782 | } else { | ||
| 783 | /* REVISIT report overcurrent to hub? */ | ||
| 784 | ERR("vbus too slow, devctl %02x\n", devctl); | ||
| 785 | tusb_source_power(musb, 0); | ||
| 786 | } | ||
| 787 | break; | ||
| 788 | case OTG_STATE_A_WAIT_BCON: | ||
| 789 | if (musb->a_wait_bcon != 0) | ||
| 790 | idle_timeout = jiffies | ||
| 791 | + msecs_to_jiffies(musb->a_wait_bcon); | ||
| 792 | break; | ||
| 793 | case OTG_STATE_A_SUSPEND: | ||
| 794 | break; | ||
| 795 | case OTG_STATE_B_WAIT_ACON: | ||
| 796 | break; | ||
| 797 | default: | ||
| 798 | break; | ||
| 799 | } | ||
| 800 | } | ||
| 801 | schedule_work(&musb->irq_work); | ||
| 802 | |||
| 803 | return idle_timeout; | ||
| 804 | } | ||
| 805 | |||
| 806 | static irqreturn_t tusb_interrupt(int irq, void *__hci) | ||
| 807 | { | ||
| 808 | struct musb *musb = __hci; | ||
| 809 | void __iomem *tbase = musb->ctrl_base; | ||
| 810 | unsigned long flags, idle_timeout = 0; | ||
| 811 | u32 int_mask, int_src; | ||
| 812 | |||
| 813 | spin_lock_irqsave(&musb->lock, flags); | ||
| 814 | |||
| 815 | /* Mask all interrupts to allow using both edge and level GPIO irq */ | ||
| 816 | int_mask = musb_readl(tbase, TUSB_INT_MASK); | ||
| 817 | musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); | ||
| 818 | |||
| 819 | int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; | ||
| 820 | DBG(3, "TUSB IRQ %08x\n", int_src); | ||
| 821 | |||
| 822 | musb->int_usb = (u8) int_src; | ||
| 823 | |||
| 824 | /* Acknowledge wake-up source interrupts */ | ||
| 825 | if (int_src & TUSB_INT_SRC_DEV_WAKEUP) { | ||
| 826 | u32 reg; | ||
| 827 | u32 i; | ||
| 828 | |||
| 829 | if (tusb_get_revision(musb) == TUSB_REV_30) | ||
| 830 | tusb_wbus_quirk(musb, 0); | ||
| 831 | |||
| 832 | /* there are issues re-locking the PLL on wakeup ... */ | ||
| 833 | |||
| 834 | /* work around issue 8 */ | ||
| 835 | for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) { | ||
| 836 | musb_writel(tbase, TUSB_SCRATCH_PAD, 0); | ||
| 837 | musb_writel(tbase, TUSB_SCRATCH_PAD, i); | ||
| 838 | reg = musb_readl(tbase, TUSB_SCRATCH_PAD); | ||
| 839 | if (reg == i) | ||
| 840 | break; | ||
| 841 | DBG(6, "TUSB NOR not ready\n"); | ||
| 842 | } | ||
| 843 | |||
| 844 | /* work around issue 13 (2nd half) */ | ||
| 845 | tusb_set_clock_source(musb, 1); | ||
| 846 | |||
| 847 | reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE); | ||
| 848 | musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); | ||
| 849 | if (reg & ~TUSB_PRCM_WNORCS) { | ||
| 850 | musb->is_active = 1; | ||
| 851 | schedule_work(&musb->irq_work); | ||
| 852 | } | ||
| 853 | DBG(3, "wake %sactive %02x\n", | ||
| 854 | musb->is_active ? "" : "in", reg); | ||
| 855 | |||
| 856 | /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ | ||
| 857 | } | ||
| 858 | |||
| 859 | if (int_src & TUSB_INT_SRC_USB_IP_CONN) | ||
| 860 | del_timer(&musb_idle_timer); | ||
| 861 | |||
| 862 | /* OTG state change reports (annoyingly) not issued by Mentor core */ | ||
| 863 | if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG | ||
| 864 | | TUSB_INT_SRC_OTG_TIMEOUT | ||
| 865 | | TUSB_INT_SRC_ID_STATUS_CHNG)) | ||
| 866 | idle_timeout = tusb_otg_ints(musb, int_src, tbase); | ||
| 867 | |||
| 868 | /* TX dma callback must be handled here, RX dma callback is | ||
| 869 | * handled in tusb_omap_dma_cb. | ||
| 870 | */ | ||
| 871 | if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) { | ||
| 872 | u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); | ||
| 873 | u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); | ||
| 874 | |||
| 875 | DBG(3, "DMA IRQ %08x\n", dma_src); | ||
| 876 | real_dma_src = ~real_dma_src & dma_src; | ||
| 877 | if (tusb_dma_omap() && real_dma_src) { | ||
| 878 | int tx_source = (real_dma_src & 0xffff); | ||
| 879 | int i; | ||
| 880 | |||
| 881 | for (i = 1; i <= 15; i++) { | ||
| 882 | if (tx_source & (1 << i)) { | ||
| 883 | DBG(3, "completing ep%i %s\n", i, "tx"); | ||
| 884 | musb_dma_completion(musb, i, 1); | ||
| 885 | } | ||
| 886 | } | ||
| 887 | } | ||
| 888 | musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src); | ||
| 889 | } | ||
| 890 | |||
| 891 | /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */ | ||
| 892 | if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) { | ||
| 893 | u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC); | ||
| 894 | |||
| 895 | musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src); | ||
| 896 | musb->int_rx = (((musb_src >> 16) & 0xffff) << 1); | ||
| 897 | musb->int_tx = (musb_src & 0xffff); | ||
| 898 | } else { | ||
| 899 | musb->int_rx = 0; | ||
| 900 | musb->int_tx = 0; | ||
| 901 | } | ||
| 902 | |||
| 903 | if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff)) | ||
| 904 | musb_interrupt(musb); | ||
| 905 | |||
| 906 | /* Acknowledge TUSB interrupts. Clear only non-reserved bits */ | ||
| 907 | musb_writel(tbase, TUSB_INT_SRC_CLEAR, | ||
| 908 | int_src & ~TUSB_INT_MASK_RESERVED_BITS); | ||
| 909 | |||
| 910 | musb_platform_try_idle(musb, idle_timeout); | ||
| 911 | |||
| 912 | musb_writel(tbase, TUSB_INT_MASK, int_mask); | ||
| 913 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 914 | |||
| 915 | return IRQ_HANDLED; | ||
| 916 | } | ||
| 917 | |||
| 918 | static int dma_off; | ||
| 919 | |||
| 920 | /* | ||
| 921 | * Enables TUSB6010. Caller must take care of locking. | ||
| 922 | * REVISIT: | ||
| 923 | * - Check what is unnecessary in MGC_HdrcStart() | ||
| 924 | */ | ||
| 925 | void musb_platform_enable(struct musb *musb) | ||
| 926 | { | ||
| 927 | void __iomem *tbase = musb->ctrl_base; | ||
| 928 | |||
| 929 | /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF. | ||
| 930 | * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */ | ||
| 931 | musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF); | ||
| 932 | |||
| 933 | /* Setup TUSB interrupt, disable DMA and GPIO interrupts */ | ||
| 934 | musb_writel(tbase, TUSB_USBIP_INT_MASK, 0); | ||
| 935 | musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); | ||
| 936 | musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); | ||
| 937 | |||
| 938 | /* Clear all subsystem interrups */ | ||
| 939 | musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff); | ||
| 940 | musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff); | ||
| 941 | musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff); | ||
| 942 | |||
| 943 | /* Acknowledge pending interrupt(s) */ | ||
| 944 | musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS); | ||
| 945 | |||
| 946 | /* Only 0 clock cycles for minimum interrupt de-assertion time and | ||
| 947 | * interrupt polarity active low seems to work reliably here */ | ||
| 948 | musb_writel(tbase, TUSB_INT_CTRL_CONF, | ||
| 949 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); | ||
| 950 | |||
| 951 | set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); | ||
| 952 | |||
| 953 | /* maybe force into the Default-A OTG state machine */ | ||
| 954 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) | ||
| 955 | & TUSB_DEV_OTG_STAT_ID_STATUS)) | ||
| 956 | musb_writel(tbase, TUSB_INT_SRC_SET, | ||
| 957 | TUSB_INT_SRC_ID_STATUS_CHNG); | ||
| 958 | |||
| 959 | if (is_dma_capable() && dma_off) | ||
| 960 | printk(KERN_WARNING "%s %s: dma not reactivated\n", | ||
| 961 | __FILE__, __func__); | ||
| 962 | else | ||
| 963 | dma_off = 1; | ||
| 964 | } | ||
| 965 | |||
| 966 | /* | ||
| 967 | * Disables TUSB6010. Caller must take care of locking. | ||
| 968 | */ | ||
| 969 | void musb_platform_disable(struct musb *musb) | ||
| 970 | { | ||
| 971 | void __iomem *tbase = musb->ctrl_base; | ||
| 972 | |||
| 973 | /* FIXME stop DMA, IRQs, timers, ... */ | ||
| 974 | |||
| 975 | /* disable all IRQs */ | ||
| 976 | musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); | ||
| 977 | musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff); | ||
| 978 | musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); | ||
| 979 | musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); | ||
| 980 | |||
| 981 | del_timer(&musb_idle_timer); | ||
| 982 | |||
| 983 | if (is_dma_capable() && !dma_off) { | ||
| 984 | printk(KERN_WARNING "%s %s: dma still active\n", | ||
| 985 | __FILE__, __func__); | ||
| 986 | dma_off = 1; | ||
| 987 | } | ||
| 988 | } | ||
| 989 | |||
| 990 | /* | ||
| 991 | * Sets up TUSB6010 CPU interface specific signals and registers | ||
| 992 | * Note: Settings optimized for OMAP24xx | ||
| 993 | */ | ||
| 994 | static void __init tusb_setup_cpu_interface(struct musb *musb) | ||
| 995 | { | ||
| 996 | void __iomem *tbase = musb->ctrl_base; | ||
| 997 | |||
| 998 | /* | ||
| 999 | * Disable GPIO[5:0] pullups (used as output DMA requests) | ||
| 1000 | * Don't disable GPIO[7:6] as they are needed for wake-up. | ||
| 1001 | */ | ||
| 1002 | musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F); | ||
| 1003 | |||
| 1004 | /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */ | ||
| 1005 | musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF); | ||
| 1006 | |||
| 1007 | /* Turn GPIO[5:0] to DMAREQ[5:0] signals */ | ||
| 1008 | musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f)); | ||
| 1009 | |||
| 1010 | /* Burst size 16x16 bits, all six DMA requests enabled, DMA request | ||
| 1011 | * de-assertion time 2 system clocks p 62 */ | ||
| 1012 | musb_writel(tbase, TUSB_DMA_REQ_CONF, | ||
| 1013 | TUSB_DMA_REQ_CONF_BURST_SIZE(2) | | ||
| 1014 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | | ||
| 1015 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); | ||
| 1016 | |||
| 1017 | /* Set 0 wait count for synchronous burst access */ | ||
| 1018 | musb_writel(tbase, TUSB_WAIT_COUNT, 1); | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static int __init tusb_start(struct musb *musb) | ||
| 1022 | { | ||
| 1023 | void __iomem *tbase = musb->ctrl_base; | ||
| 1024 | int ret = 0; | ||
| 1025 | unsigned long flags; | ||
| 1026 | u32 reg; | ||
| 1027 | |||
| 1028 | if (musb->board_set_power) | ||
| 1029 | ret = musb->board_set_power(1); | ||
| 1030 | if (ret != 0) { | ||
| 1031 | printk(KERN_ERR "tusb: Cannot enable TUSB6010\n"); | ||
| 1032 | return ret; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1036 | |||
| 1037 | if (musb_readl(tbase, TUSB_PROD_TEST_RESET) != | ||
| 1038 | TUSB_PROD_TEST_RESET_VAL) { | ||
| 1039 | printk(KERN_ERR "tusb: Unable to detect TUSB6010\n"); | ||
| 1040 | goto err; | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | ret = tusb_print_revision(musb); | ||
| 1044 | if (ret < 2) { | ||
| 1045 | printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n", | ||
| 1046 | ret); | ||
| 1047 | goto err; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when | ||
| 1051 | * NOR FLASH interface is used */ | ||
| 1052 | musb_writel(tbase, TUSB_VLYNQ_CTRL, 8); | ||
| 1053 | |||
| 1054 | /* Select PHY free running 60MHz as a system clock */ | ||
| 1055 | tusb_set_clock_source(musb, 1); | ||
| 1056 | |||
| 1057 | /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for | ||
| 1058 | * power saving, enable VBus detect and session end comparators, | ||
| 1059 | * enable IDpullup, enable VBus charging */ | ||
| 1060 | musb_writel(tbase, TUSB_PRCM_MNGMT, | ||
| 1061 | TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) | | ||
| 1062 | TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN | | ||
| 1063 | TUSB_PRCM_MNGMT_OTG_SESS_END_EN | | ||
| 1064 | TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN | | ||
| 1065 | TUSB_PRCM_MNGMT_OTG_ID_PULLUP); | ||
| 1066 | tusb_setup_cpu_interface(musb); | ||
| 1067 | |||
| 1068 | /* simplify: always sense/pullup ID pins, as if in OTG mode */ | ||
| 1069 | reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); | ||
| 1070 | reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 1071 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg); | ||
| 1072 | |||
| 1073 | reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL); | ||
| 1074 | reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
| 1075 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg); | ||
| 1076 | |||
| 1077 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1078 | |||
| 1079 | return 0; | ||
| 1080 | |||
| 1081 | err: | ||
| 1082 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1083 | |||
| 1084 | if (musb->board_set_power) | ||
| 1085 | musb->board_set_power(0); | ||
| 1086 | |||
| 1087 | return -ENODEV; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | int __init musb_platform_init(struct musb *musb) | ||
| 1091 | { | ||
| 1092 | struct platform_device *pdev; | ||
| 1093 | struct resource *mem; | ||
| 1094 | void __iomem *sync; | ||
| 1095 | int ret; | ||
| 1096 | |||
| 1097 | pdev = to_platform_device(musb->controller); | ||
| 1098 | |||
| 1099 | /* dma address for async dma */ | ||
| 1100 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1101 | musb->async = mem->start; | ||
| 1102 | |||
| 1103 | /* dma address for sync dma */ | ||
| 1104 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1105 | if (!mem) { | ||
| 1106 | pr_debug("no sync dma resource?\n"); | ||
| 1107 | return -ENODEV; | ||
| 1108 | } | ||
| 1109 | musb->sync = mem->start; | ||
| 1110 | |||
| 1111 | sync = ioremap(mem->start, mem->end - mem->start + 1); | ||
| 1112 | if (!sync) { | ||
| 1113 | pr_debug("ioremap for sync failed\n"); | ||
| 1114 | return -ENOMEM; | ||
| 1115 | } | ||
| 1116 | musb->sync_va = sync; | ||
| 1117 | |||
| 1118 | /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400, | ||
| 1119 | * FIFOs at 0x600, TUSB at 0x800 | ||
| 1120 | */ | ||
| 1121 | musb->mregs += TUSB_BASE_OFFSET; | ||
| 1122 | |||
| 1123 | ret = tusb_start(musb); | ||
| 1124 | if (ret) { | ||
| 1125 | printk(KERN_ERR "Could not start tusb6010 (%d)\n", | ||
| 1126 | ret); | ||
| 1127 | return -ENODEV; | ||
| 1128 | } | ||
| 1129 | musb->isr = tusb_interrupt; | ||
| 1130 | |||
| 1131 | if (is_host_enabled(musb)) | ||
| 1132 | musb->board_set_vbus = tusb_source_power; | ||
| 1133 | if (is_peripheral_enabled(musb)) | ||
| 1134 | musb->xceiv.set_power = tusb_draw_power; | ||
| 1135 | |||
| 1136 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | ||
| 1137 | |||
| 1138 | return ret; | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | int musb_platform_exit(struct musb *musb) | ||
| 1142 | { | ||
| 1143 | del_timer_sync(&musb_idle_timer); | ||
| 1144 | |||
| 1145 | if (musb->board_set_power) | ||
| 1146 | musb->board_set_power(0); | ||
| 1147 | |||
| 1148 | iounmap(musb->sync_va); | ||
| 1149 | |||
| 1150 | return 0; | ||
| 1151 | } | ||
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h new file mode 100644 index 000000000000..ab8c96286ce6 --- /dev/null +++ b/drivers/usb/musb/tusb6010.h | |||
| @@ -0,0 +1,233 @@ | |||
| 1 | /* | ||
| 2 | * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 Nokia Corporation | ||
| 5 | * Jarkko Nikula <jarkko.nikula@nokia.com> | ||
| 6 | * Tony Lindgren <tony@atomide.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef __TUSB6010_H__ | ||
| 14 | #define __TUSB6010_H__ | ||
| 15 | |||
| 16 | extern u8 tusb_get_revision(struct musb *musb); | ||
| 17 | |||
| 18 | #ifdef CONFIG_USB_TUSB6010 | ||
| 19 | #define musb_in_tusb() 1 | ||
| 20 | #else | ||
| 21 | #define musb_in_tusb() 0 | ||
| 22 | #endif | ||
| 23 | |||
| 24 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | ||
| 25 | #define tusb_dma_omap() 1 | ||
| 26 | #else | ||
| 27 | #define tusb_dma_omap() 0 | ||
| 28 | #endif | ||
| 29 | |||
| 30 | /* VLYNQ control register. 32-bit at offset 0x000 */ | ||
| 31 | #define TUSB_VLYNQ_CTRL 0x004 | ||
| 32 | |||
| 33 | /* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */ | ||
| 34 | #define TUSB_BASE_OFFSET 0x400 | ||
| 35 | |||
| 36 | /* FIFO registers 32-bit at offset 0x600 */ | ||
| 37 | #define TUSB_FIFO_BASE 0x600 | ||
| 38 | |||
| 39 | /* Device System & Control registers. 32-bit at offset 0x800 */ | ||
| 40 | #define TUSB_SYS_REG_BASE 0x800 | ||
| 41 | |||
| 42 | #define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000) | ||
| 43 | #define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16) | ||
| 44 | #define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15) | ||
| 45 | #define TUSB_DEV_CONF_SOFT_ID (1 << 1) | ||
| 46 | #define TUSB_DEV_CONF_ID_SEL (1 << 0) | ||
| 47 | |||
| 48 | #define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004) | ||
| 49 | #define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008) | ||
| 50 | #define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24) | ||
| 51 | #define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23) | ||
| 52 | #define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19) | ||
| 53 | #define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18) | ||
| 54 | #define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17) | ||
| 55 | #define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16) | ||
| 56 | #define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15) | ||
| 57 | #define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14) | ||
| 58 | #define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13) | ||
| 59 | #define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12) | ||
| 60 | #define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11) | ||
| 61 | #define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10) | ||
| 62 | #define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9) | ||
| 63 | #define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7) | ||
| 64 | #define TUSB_PHY_OTG_CTRL_PD (1 << 6) | ||
| 65 | #define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5) | ||
| 66 | #define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4) | ||
| 67 | #define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3) | ||
| 68 | #define TUSB_PHY_OTG_CTRL_RESET (1 << 2) | ||
| 69 | #define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1) | ||
| 70 | #define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0) | ||
| 71 | |||
| 72 | /*OTG status register */ | ||
| 73 | #define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c) | ||
| 74 | #define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8) | ||
| 75 | #define TUSB_DEV_OTG_STAT_SESS_END (1 << 7) | ||
| 76 | #define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6) | ||
| 77 | #define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5) | ||
| 78 | #define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4) | ||
| 79 | #define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3) | ||
| 80 | #define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2) | ||
| 81 | #define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0) | ||
| 82 | #define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1) | ||
| 83 | #define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0) | ||
| 84 | |||
| 85 | #define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010) | ||
| 86 | # define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31) | ||
| 87 | # define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff) | ||
| 88 | #define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014) | ||
| 89 | |||
| 90 | /* PRCM configuration register */ | ||
| 91 | #define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018) | ||
| 92 | #define TUSB_PRCM_CONF_SFW_CPEN (1 << 24) | ||
| 93 | #define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16) | ||
| 94 | |||
| 95 | /* PRCM management register */ | ||
| 96 | #define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c) | ||
| 97 | #define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25) | ||
| 98 | #define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24) | ||
| 99 | #define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20) | ||
| 100 | #define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19) | ||
| 101 | #define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18) | ||
| 102 | #define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17) | ||
| 103 | #define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10) | ||
| 104 | #define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9) | ||
| 105 | #define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8) | ||
| 106 | #define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4) | ||
| 107 | #define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3) | ||
| 108 | #define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2) | ||
| 109 | #define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1) | ||
| 110 | #define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0) | ||
| 111 | |||
| 112 | /* Wake-up source clear and mask registers */ | ||
| 113 | #define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020) | ||
| 114 | #define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028) | ||
| 115 | #define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c) | ||
| 116 | #define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13) | ||
| 117 | #define TUSB_PRCM_WGPIO_7 (1 << 12) | ||
| 118 | #define TUSB_PRCM_WGPIO_6 (1 << 11) | ||
| 119 | #define TUSB_PRCM_WGPIO_5 (1 << 10) | ||
| 120 | #define TUSB_PRCM_WGPIO_4 (1 << 9) | ||
| 121 | #define TUSB_PRCM_WGPIO_3 (1 << 8) | ||
| 122 | #define TUSB_PRCM_WGPIO_2 (1 << 7) | ||
| 123 | #define TUSB_PRCM_WGPIO_1 (1 << 6) | ||
| 124 | #define TUSB_PRCM_WGPIO_0 (1 << 5) | ||
| 125 | #define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */ | ||
| 126 | #define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */ | ||
| 127 | #define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */ | ||
| 128 | #define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */ | ||
| 129 | #define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */ | ||
| 130 | |||
| 131 | #define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030) | ||
| 132 | #define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034) | ||
| 133 | #define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038) | ||
| 134 | #define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c) | ||
| 135 | #define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040) | ||
| 136 | #define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044) | ||
| 137 | #define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048) | ||
| 138 | #define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c) | ||
| 139 | #define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050) | ||
| 140 | #define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054) | ||
| 141 | #define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058) | ||
| 142 | #define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c) | ||
| 143 | #define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060) | ||
| 144 | #define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064) | ||
| 145 | #define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068) | ||
| 146 | #define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c) | ||
| 147 | |||
| 148 | /* NOR flash interrupt source registers */ | ||
| 149 | #define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070) | ||
| 150 | #define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074) | ||
| 151 | #define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078) | ||
| 152 | #define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c) | ||
| 153 | #define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24) | ||
| 154 | #define TUSB_INT_SRC_USB_IP_CORE (1 << 17) | ||
| 155 | #define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16) | ||
| 156 | #define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15) | ||
| 157 | #define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14) | ||
| 158 | #define TUSB_INT_SRC_DEV_WAKEUP (1 << 13) | ||
| 159 | #define TUSB_INT_SRC_DEV_READY (1 << 12) | ||
| 160 | #define TUSB_INT_SRC_USB_IP_TX (1 << 9) | ||
| 161 | #define TUSB_INT_SRC_USB_IP_RX (1 << 8) | ||
| 162 | #define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7) | ||
| 163 | #define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6) | ||
| 164 | #define TUSB_INT_SRC_USB_IP_DISCON (1 << 5) | ||
| 165 | #define TUSB_INT_SRC_USB_IP_CONN (1 << 4) | ||
| 166 | #define TUSB_INT_SRC_USB_IP_SOF (1 << 3) | ||
| 167 | #define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2) | ||
| 168 | #define TUSB_INT_SRC_USB_IP_RESUME (1 << 1) | ||
| 169 | #define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0) | ||
| 170 | |||
| 171 | /* NOR flash interrupt registers reserved bits. Must be written as 0 */ | ||
| 172 | #define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17) | ||
| 173 | #define TUSB_INT_MASK_RESERVED_13 (1 << 13) | ||
| 174 | #define TUSB_INT_MASK_RESERVED_8 (0xf << 8) | ||
| 175 | #define TUSB_INT_SRC_RESERVED_26 (0x1f << 26) | ||
| 176 | #define TUSB_INT_SRC_RESERVED_18 (0x3f << 18) | ||
| 177 | #define TUSB_INT_SRC_RESERVED_10 (0x03 << 10) | ||
| 178 | |||
| 179 | /* Reserved bits for NOR flash interrupt mask and clear register */ | ||
| 180 | #define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \ | ||
| 181 | TUSB_INT_MASK_RESERVED_13 | \ | ||
| 182 | TUSB_INT_MASK_RESERVED_8) | ||
| 183 | |||
| 184 | /* Reserved bits for NOR flash interrupt status register */ | ||
| 185 | #define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \ | ||
| 186 | TUSB_INT_SRC_RESERVED_18 | \ | ||
| 187 | TUSB_INT_SRC_RESERVED_10) | ||
| 188 | |||
| 189 | #define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080) | ||
| 190 | #define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084) | ||
| 191 | #define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100) | ||
| 192 | #define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104) | ||
| 193 | #define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108) | ||
| 194 | #define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148) | ||
| 195 | |||
| 196 | /* Offsets from each ep base register */ | ||
| 197 | #define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */ | ||
| 198 | #define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */ | ||
| 199 | #define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188 | ||
| 200 | |||
| 201 | #define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8) | ||
| 202 | #define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4) | ||
| 203 | #define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8) | ||
| 204 | |||
| 205 | /* Device System & Control register bitfields */ | ||
| 206 | #define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18) | ||
| 207 | #define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17) | ||
| 208 | #define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16) | ||
| 209 | #define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24) | ||
| 210 | #define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26) | ||
| 211 | #define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20) | ||
| 212 | #define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16) | ||
| 213 | #define TUSB_EP0_CONFIG_SW_EN (1 << 8) | ||
| 214 | #define TUSB_EP0_CONFIG_DIR_TX (1 << 7) | ||
| 215 | #define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f) | ||
| 216 | #define TUSB_EP_CONFIG_SW_EN (1 << 31) | ||
| 217 | #define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff) | ||
| 218 | #define TUSB_PROD_TEST_RESET_VAL 0xa596 | ||
| 219 | #define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20) | ||
| 220 | |||
| 221 | #define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8) | ||
| 222 | #define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc) | ||
| 223 | #define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf) | ||
| 224 | #define TUSB_DIDR1_HI_REV_20 0 | ||
| 225 | #define TUSB_DIDR1_HI_REV_30 1 | ||
| 226 | #define TUSB_DIDR1_HI_REV_31 2 | ||
| 227 | |||
| 228 | #define TUSB_REV_10 0x10 | ||
| 229 | #define TUSB_REV_20 0x20 | ||
| 230 | #define TUSB_REV_30 0x30 | ||
| 231 | #define TUSB_REV_31 0x31 | ||
| 232 | |||
| 233 | #endif /* __TUSB6010_H__ */ | ||
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c new file mode 100644 index 000000000000..52f7f29cebda --- /dev/null +++ b/drivers/usb/musb/tusb6010_omap.c | |||
| @@ -0,0 +1,719 @@ | |||
| 1 | /* | ||
| 2 | * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 Nokia Corporation | ||
| 5 | * Tony Lindgren <tony@atomide.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/errno.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/usb.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/dma-mapping.h> | ||
| 18 | #include <asm/arch/dma.h> | ||
| 19 | #include <asm/arch/mux.h> | ||
| 20 | |||
| 21 | #include "musb_core.h" | ||
| 22 | |||
| 23 | #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) | ||
| 24 | |||
| 25 | #define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */ | ||
| 26 | |||
| 27 | struct tusb_omap_dma_ch { | ||
| 28 | struct musb *musb; | ||
| 29 | void __iomem *tbase; | ||
| 30 | unsigned long phys_offset; | ||
| 31 | int epnum; | ||
| 32 | u8 tx; | ||
| 33 | struct musb_hw_ep *hw_ep; | ||
| 34 | |||
| 35 | int ch; | ||
| 36 | s8 dmareq; | ||
| 37 | s8 sync_dev; | ||
| 38 | |||
| 39 | struct tusb_omap_dma *tusb_dma; | ||
| 40 | |||
| 41 | void __iomem *dma_addr; | ||
| 42 | |||
| 43 | u32 len; | ||
| 44 | u16 packet_sz; | ||
| 45 | u16 transfer_packet_sz; | ||
| 46 | u32 transfer_len; | ||
| 47 | u32 completed_len; | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct tusb_omap_dma { | ||
| 51 | struct dma_controller controller; | ||
| 52 | struct musb *musb; | ||
| 53 | void __iomem *tbase; | ||
| 54 | |||
| 55 | int ch; | ||
| 56 | s8 dmareq; | ||
| 57 | s8 sync_dev; | ||
| 58 | unsigned multichannel:1; | ||
| 59 | }; | ||
| 60 | |||
| 61 | static int tusb_omap_dma_start(struct dma_controller *c) | ||
| 62 | { | ||
| 63 | struct tusb_omap_dma *tusb_dma; | ||
| 64 | |||
| 65 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
| 66 | |||
| 67 | /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ | ||
| 68 | |||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int tusb_omap_dma_stop(struct dma_controller *c) | ||
| 73 | { | ||
| 74 | struct tusb_omap_dma *tusb_dma; | ||
| 75 | |||
| 76 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
| 77 | |||
| 78 | /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ | ||
| 79 | |||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Allocate dmareq0 to the current channel unless it's already taken | ||
| 85 | */ | ||
| 86 | static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) | ||
| 87 | { | ||
| 88 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
| 89 | |||
| 90 | if (reg != 0) { | ||
| 91 | DBG(3, "ep%i dmareq0 is busy for ep%i\n", | ||
| 92 | chdat->epnum, reg & 0xf); | ||
| 93 | return -EAGAIN; | ||
| 94 | } | ||
| 95 | |||
| 96 | if (chdat->tx) | ||
| 97 | reg = (1 << 4) | chdat->epnum; | ||
| 98 | else | ||
| 99 | reg = chdat->epnum; | ||
| 100 | |||
| 101 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); | ||
| 102 | |||
| 103 | return 0; | ||
| 104 | } | ||
| 105 | |||
| 106 | static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat) | ||
| 107 | { | ||
| 108 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
| 109 | |||
| 110 | if ((reg & 0xf) != chdat->epnum) { | ||
| 111 | printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n", | ||
| 112 | chdat->epnum, reg & 0xf); | ||
| 113 | return; | ||
| 114 | } | ||
| 115 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0); | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in | ||
| 120 | * musb_gadget.c. | ||
| 121 | */ | ||
| 122 | static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) | ||
| 123 | { | ||
| 124 | struct dma_channel *channel = (struct dma_channel *)data; | ||
| 125 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
| 126 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; | ||
| 127 | struct musb *musb = chdat->musb; | ||
| 128 | struct musb_hw_ep *hw_ep = chdat->hw_ep; | ||
| 129 | void __iomem *ep_conf = hw_ep->conf; | ||
| 130 | void __iomem *mbase = musb->mregs; | ||
| 131 | unsigned long remaining, flags, pio; | ||
| 132 | int ch; | ||
| 133 | |||
| 134 | spin_lock_irqsave(&musb->lock, flags); | ||
| 135 | |||
| 136 | if (tusb_dma->multichannel) | ||
| 137 | ch = chdat->ch; | ||
| 138 | else | ||
| 139 | ch = tusb_dma->ch; | ||
| 140 | |||
| 141 | if (ch_status != OMAP_DMA_BLOCK_IRQ) | ||
| 142 | printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); | ||
| 143 | |||
| 144 | DBG(3, "ep%i %s dma callback ch: %i status: %x\n", | ||
| 145 | chdat->epnum, chdat->tx ? "tx" : "rx", | ||
| 146 | ch, ch_status); | ||
| 147 | |||
| 148 | if (chdat->tx) | ||
| 149 | remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); | ||
| 150 | else | ||
| 151 | remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); | ||
| 152 | |||
| 153 | remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining); | ||
| 154 | |||
| 155 | /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ | ||
| 156 | if (unlikely(remaining > chdat->transfer_len)) { | ||
| 157 | DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", | ||
| 158 | chdat->tx ? "tx" : "rx", chdat->ch, | ||
| 159 | remaining); | ||
| 160 | remaining = 0; | ||
| 161 | } | ||
| 162 | |||
| 163 | channel->actual_len = chdat->transfer_len - remaining; | ||
| 164 | pio = chdat->len - channel->actual_len; | ||
| 165 | |||
| 166 | DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); | ||
| 167 | |||
| 168 | /* Transfer remaining 1 - 31 bytes */ | ||
| 169 | if (pio > 0 && pio < 32) { | ||
| 170 | u8 *buf; | ||
| 171 | |||
| 172 | DBG(3, "Using PIO for remaining %lu bytes\n", pio); | ||
| 173 | buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; | ||
| 174 | if (chdat->tx) { | ||
| 175 | dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), | ||
| 176 | chdat->transfer_len, DMA_TO_DEVICE); | ||
| 177 | musb_write_fifo(hw_ep, pio, buf); | ||
| 178 | } else { | ||
| 179 | musb_read_fifo(hw_ep, pio, buf); | ||
| 180 | dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), | ||
| 181 | chdat->transfer_len, DMA_FROM_DEVICE); | ||
| 182 | } | ||
| 183 | channel->actual_len += pio; | ||
| 184 | } | ||
| 185 | |||
| 186 | if (!tusb_dma->multichannel) | ||
| 187 | tusb_omap_free_shared_dmareq(chdat); | ||
| 188 | |||
| 189 | channel->status = MUSB_DMA_STATUS_FREE; | ||
| 190 | |||
| 191 | /* Handle only RX callbacks here. TX callbacks must be handled based | ||
| 192 | * on the TUSB DMA status interrupt. | ||
| 193 | * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback | ||
| 194 | * interrupt for RX and TX. | ||
| 195 | */ | ||
| 196 | if (!chdat->tx) | ||
| 197 | musb_dma_completion(musb, chdat->epnum, chdat->tx); | ||
| 198 | |||
| 199 | /* We must terminate short tx transfers manually by setting TXPKTRDY. | ||
| 200 | * REVISIT: This same problem may occur with other MUSB dma as well. | ||
| 201 | * Easy to test with g_ether by pinging the MUSB board with ping -s54. | ||
| 202 | */ | ||
| 203 | if ((chdat->transfer_len < chdat->packet_sz) | ||
| 204 | || (chdat->transfer_len % chdat->packet_sz != 0)) { | ||
| 205 | u16 csr; | ||
| 206 | |||
| 207 | if (chdat->tx) { | ||
| 208 | DBG(3, "terminating short tx packet\n"); | ||
| 209 | musb_ep_select(mbase, chdat->epnum); | ||
| 210 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); | ||
| 211 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY | ||
| 212 | | MUSB_TXCSR_P_WZC_BITS; | ||
| 213 | musb_writew(hw_ep->regs, MUSB_TXCSR, csr); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 218 | } | ||
| 219 | |||
| 220 | static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | ||
| 221 | u8 rndis_mode, dma_addr_t dma_addr, u32 len) | ||
| 222 | { | ||
| 223 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
| 224 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; | ||
| 225 | struct musb *musb = chdat->musb; | ||
| 226 | struct musb_hw_ep *hw_ep = chdat->hw_ep; | ||
| 227 | void __iomem *mbase = musb->mregs; | ||
| 228 | void __iomem *ep_conf = hw_ep->conf; | ||
| 229 | dma_addr_t fifo = hw_ep->fifo_sync; | ||
| 230 | struct omap_dma_channel_params dma_params; | ||
| 231 | u32 dma_remaining; | ||
| 232 | int src_burst, dst_burst; | ||
| 233 | u16 csr; | ||
| 234 | int ch; | ||
| 235 | s8 dmareq; | ||
| 236 | s8 sync_dev; | ||
| 237 | |||
| 238 | if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) | ||
| 239 | return false; | ||
| 240 | |||
| 241 | /* | ||
| 242 | * HW issue #10: Async dma will eventually corrupt the XFR_SIZE | ||
| 243 | * register which will cause missed DMA interrupt. We could try to | ||
| 244 | * use a timer for the callback, but it is unsafe as the XFR_SIZE | ||
| 245 | * register is corrupt, and we won't know if the DMA worked. | ||
| 246 | */ | ||
| 247 | if (dma_addr & 0x2) | ||
| 248 | return false; | ||
| 249 | |||
| 250 | /* | ||
| 251 | * Because of HW issue #10, it seems like mixing sync DMA and async | ||
| 252 | * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before | ||
| 253 | * using the channel for DMA. | ||
| 254 | */ | ||
| 255 | if (chdat->tx) | ||
| 256 | dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); | ||
| 257 | else | ||
| 258 | dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); | ||
| 259 | |||
| 260 | dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); | ||
| 261 | if (dma_remaining) { | ||
| 262 | DBG(2, "Busy %s dma ch%i, not using: %08x\n", | ||
| 263 | chdat->tx ? "tx" : "rx", chdat->ch, | ||
| 264 | dma_remaining); | ||
| 265 | return false; | ||
| 266 | } | ||
| 267 | |||
| 268 | chdat->transfer_len = len & ~0x1f; | ||
| 269 | |||
| 270 | if (len < packet_sz) | ||
| 271 | chdat->transfer_packet_sz = chdat->transfer_len; | ||
| 272 | else | ||
| 273 | chdat->transfer_packet_sz = packet_sz; | ||
| 274 | |||
| 275 | if (tusb_dma->multichannel) { | ||
| 276 | ch = chdat->ch; | ||
| 277 | dmareq = chdat->dmareq; | ||
| 278 | sync_dev = chdat->sync_dev; | ||
| 279 | } else { | ||
| 280 | if (tusb_omap_use_shared_dmareq(chdat) != 0) { | ||
| 281 | DBG(3, "could not get dma for ep%i\n", chdat->epnum); | ||
| 282 | return false; | ||
| 283 | } | ||
| 284 | if (tusb_dma->ch < 0) { | ||
| 285 | /* REVISIT: This should get blocked earlier, happens | ||
| 286 | * with MSC ErrorRecoveryTest | ||
| 287 | */ | ||
| 288 | WARN_ON(1); | ||
| 289 | return false; | ||
| 290 | } | ||
| 291 | |||
| 292 | ch = tusb_dma->ch; | ||
| 293 | dmareq = tusb_dma->dmareq; | ||
| 294 | sync_dev = tusb_dma->sync_dev; | ||
| 295 | omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); | ||
| 296 | } | ||
| 297 | |||
| 298 | chdat->packet_sz = packet_sz; | ||
| 299 | chdat->len = len; | ||
| 300 | channel->actual_len = 0; | ||
| 301 | chdat->dma_addr = (void __iomem *)dma_addr; | ||
| 302 | channel->status = MUSB_DMA_STATUS_BUSY; | ||
| 303 | |||
| 304 | /* Since we're recycling dma areas, we need to clean or invalidate */ | ||
| 305 | if (chdat->tx) | ||
| 306 | dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE); | ||
| 307 | else | ||
| 308 | dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); | ||
| 309 | |||
| 310 | /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ | ||
| 311 | if ((dma_addr & 0x3) == 0) { | ||
| 312 | dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; | ||
| 313 | dma_params.elem_count = 8; /* Elements in frame */ | ||
| 314 | } else { | ||
| 315 | dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; | ||
| 316 | dma_params.elem_count = 16; /* Elements in frame */ | ||
| 317 | fifo = hw_ep->fifo_async; | ||
| 318 | } | ||
| 319 | |||
| 320 | dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ | ||
| 321 | |||
| 322 | DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", | ||
| 323 | chdat->epnum, chdat->tx ? "tx" : "rx", | ||
| 324 | ch, dma_addr, chdat->transfer_len, len, | ||
| 325 | chdat->transfer_packet_sz, packet_sz); | ||
| 326 | |||
| 327 | /* | ||
| 328 | * Prepare omap DMA for transfer | ||
| 329 | */ | ||
| 330 | if (chdat->tx) { | ||
| 331 | dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; | ||
| 332 | dma_params.src_start = (unsigned long)dma_addr; | ||
| 333 | dma_params.src_ei = 0; | ||
| 334 | dma_params.src_fi = 0; | ||
| 335 | |||
| 336 | dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX; | ||
| 337 | dma_params.dst_start = (unsigned long)fifo; | ||
| 338 | dma_params.dst_ei = 1; | ||
| 339 | dma_params.dst_fi = -31; /* Loop 32 byte window */ | ||
| 340 | |||
| 341 | dma_params.trigger = sync_dev; | ||
| 342 | dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; | ||
| 343 | dma_params.src_or_dst_synch = 0; /* Dest sync */ | ||
| 344 | |||
| 345 | src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */ | ||
| 346 | dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */ | ||
| 347 | } else { | ||
| 348 | dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; | ||
| 349 | dma_params.src_start = (unsigned long)fifo; | ||
| 350 | dma_params.src_ei = 1; | ||
| 351 | dma_params.src_fi = -31; /* Loop 32 byte window */ | ||
| 352 | |||
| 353 | dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; | ||
| 354 | dma_params.dst_start = (unsigned long)dma_addr; | ||
| 355 | dma_params.dst_ei = 0; | ||
| 356 | dma_params.dst_fi = 0; | ||
| 357 | |||
| 358 | dma_params.trigger = sync_dev; | ||
| 359 | dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; | ||
| 360 | dma_params.src_or_dst_synch = 1; /* Source sync */ | ||
| 361 | |||
| 362 | src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */ | ||
| 363 | dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ | ||
| 364 | } | ||
| 365 | |||
| 366 | DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", | ||
| 367 | chdat->epnum, chdat->tx ? "tx" : "rx", | ||
| 368 | (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, | ||
| 369 | ((dma_addr & 0x3) == 0) ? "sync" : "async", | ||
| 370 | dma_params.src_start, dma_params.dst_start); | ||
| 371 | |||
| 372 | omap_set_dma_params(ch, &dma_params); | ||
| 373 | omap_set_dma_src_burst_mode(ch, src_burst); | ||
| 374 | omap_set_dma_dest_burst_mode(ch, dst_burst); | ||
| 375 | omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); | ||
| 376 | |||
| 377 | /* | ||
| 378 | * Prepare MUSB for DMA transfer | ||
| 379 | */ | ||
| 380 | if (chdat->tx) { | ||
| 381 | musb_ep_select(mbase, chdat->epnum); | ||
| 382 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); | ||
| 383 | csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | ||
| 384 | | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); | ||
| 385 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | ||
| 386 | musb_writew(hw_ep->regs, MUSB_TXCSR, csr); | ||
| 387 | } else { | ||
| 388 | musb_ep_select(mbase, chdat->epnum); | ||
| 389 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
| 390 | csr |= MUSB_RXCSR_DMAENAB; | ||
| 391 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); | ||
| 392 | musb_writew(hw_ep->regs, MUSB_RXCSR, | ||
| 393 | csr | MUSB_RXCSR_P_WZC_BITS); | ||
| 394 | } | ||
| 395 | |||
| 396 | /* | ||
| 397 | * Start DMA transfer | ||
| 398 | */ | ||
| 399 | omap_start_dma(ch); | ||
| 400 | |||
| 401 | if (chdat->tx) { | ||
| 402 | /* Send transfer_packet_sz packets at a time */ | ||
| 403 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, | ||
| 404 | chdat->transfer_packet_sz); | ||
| 405 | |||
| 406 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, | ||
| 407 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); | ||
| 408 | } else { | ||
| 409 | /* Receive transfer_packet_sz packets at a time */ | ||
| 410 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, | ||
| 411 | chdat->transfer_packet_sz << 16); | ||
| 412 | |||
| 413 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, | ||
| 414 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); | ||
| 415 | } | ||
| 416 | |||
| 417 | return true; | ||
| 418 | } | ||
| 419 | |||
| 420 | static int tusb_omap_dma_abort(struct dma_channel *channel) | ||
| 421 | { | ||
| 422 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
| 423 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; | ||
| 424 | |||
| 425 | if (!tusb_dma->multichannel) { | ||
| 426 | if (tusb_dma->ch >= 0) { | ||
| 427 | omap_stop_dma(tusb_dma->ch); | ||
| 428 | omap_free_dma(tusb_dma->ch); | ||
| 429 | tusb_dma->ch = -1; | ||
| 430 | } | ||
| 431 | |||
| 432 | tusb_dma->dmareq = -1; | ||
| 433 | tusb_dma->sync_dev = -1; | ||
| 434 | } | ||
| 435 | |||
| 436 | channel->status = MUSB_DMA_STATUS_FREE; | ||
| 437 | |||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | |||
| 441 | static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat) | ||
| 442 | { | ||
| 443 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
| 444 | int i, dmareq_nr = -1; | ||
| 445 | |||
| 446 | const int sync_dev[6] = { | ||
| 447 | OMAP24XX_DMA_EXT_DMAREQ0, | ||
| 448 | OMAP24XX_DMA_EXT_DMAREQ1, | ||
| 449 | OMAP242X_DMA_EXT_DMAREQ2, | ||
| 450 | OMAP242X_DMA_EXT_DMAREQ3, | ||
| 451 | OMAP242X_DMA_EXT_DMAREQ4, | ||
| 452 | OMAP242X_DMA_EXT_DMAREQ5, | ||
| 453 | }; | ||
| 454 | |||
| 455 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
| 456 | int cur = (reg & (0xf << (i * 5))) >> (i * 5); | ||
| 457 | if (cur == 0) { | ||
| 458 | dmareq_nr = i; | ||
| 459 | break; | ||
| 460 | } | ||
| 461 | } | ||
| 462 | |||
| 463 | if (dmareq_nr == -1) | ||
| 464 | return -EAGAIN; | ||
| 465 | |||
| 466 | reg |= (chdat->epnum << (dmareq_nr * 5)); | ||
| 467 | if (chdat->tx) | ||
| 468 | reg |= ((1 << 4) << (dmareq_nr * 5)); | ||
| 469 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); | ||
| 470 | |||
| 471 | chdat->dmareq = dmareq_nr; | ||
| 472 | chdat->sync_dev = sync_dev[chdat->dmareq]; | ||
| 473 | |||
| 474 | return 0; | ||
| 475 | } | ||
| 476 | |||
| 477 | static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat) | ||
| 478 | { | ||
| 479 | u32 reg; | ||
| 480 | |||
| 481 | if (!chdat || chdat->dmareq < 0) | ||
| 482 | return; | ||
| 483 | |||
| 484 | reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
| 485 | reg &= ~(0x1f << (chdat->dmareq * 5)); | ||
| 486 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); | ||
| 487 | |||
| 488 | chdat->dmareq = -1; | ||
| 489 | chdat->sync_dev = -1; | ||
| 490 | } | ||
| 491 | |||
| 492 | static struct dma_channel *dma_channel_pool[MAX_DMAREQ]; | ||
| 493 | |||
| 494 | static struct dma_channel * | ||
| 495 | tusb_omap_dma_allocate(struct dma_controller *c, | ||
| 496 | struct musb_hw_ep *hw_ep, | ||
| 497 | u8 tx) | ||
| 498 | { | ||
| 499 | int ret, i; | ||
| 500 | const char *dev_name; | ||
| 501 | struct tusb_omap_dma *tusb_dma; | ||
| 502 | struct musb *musb; | ||
| 503 | void __iomem *tbase; | ||
| 504 | struct dma_channel *channel = NULL; | ||
| 505 | struct tusb_omap_dma_ch *chdat = NULL; | ||
| 506 | u32 reg; | ||
| 507 | |||
| 508 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
| 509 | musb = tusb_dma->musb; | ||
| 510 | tbase = musb->ctrl_base; | ||
| 511 | |||
| 512 | reg = musb_readl(tbase, TUSB_DMA_INT_MASK); | ||
| 513 | if (tx) | ||
| 514 | reg &= ~(1 << hw_ep->epnum); | ||
| 515 | else | ||
| 516 | reg &= ~(1 << (hw_ep->epnum + 15)); | ||
| 517 | musb_writel(tbase, TUSB_DMA_INT_MASK, reg); | ||
| 518 | |||
| 519 | /* REVISIT: Why does dmareq5 not work? */ | ||
| 520 | if (hw_ep->epnum == 0) { | ||
| 521 | DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); | ||
| 522 | return NULL; | ||
| 523 | } | ||
| 524 | |||
| 525 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
| 526 | struct dma_channel *ch = dma_channel_pool[i]; | ||
| 527 | if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { | ||
| 528 | ch->status = MUSB_DMA_STATUS_FREE; | ||
| 529 | channel = ch; | ||
| 530 | chdat = ch->private_data; | ||
| 531 | break; | ||
| 532 | } | ||
| 533 | } | ||
| 534 | |||
| 535 | if (!channel) | ||
| 536 | return NULL; | ||
| 537 | |||
| 538 | if (tx) { | ||
| 539 | chdat->tx = 1; | ||
| 540 | dev_name = "TUSB transmit"; | ||
| 541 | } else { | ||
| 542 | chdat->tx = 0; | ||
| 543 | dev_name = "TUSB receive"; | ||
| 544 | } | ||
| 545 | |||
| 546 | chdat->musb = tusb_dma->musb; | ||
| 547 | chdat->tbase = tusb_dma->tbase; | ||
| 548 | chdat->hw_ep = hw_ep; | ||
| 549 | chdat->epnum = hw_ep->epnum; | ||
| 550 | chdat->dmareq = -1; | ||
| 551 | chdat->completed_len = 0; | ||
| 552 | chdat->tusb_dma = tusb_dma; | ||
| 553 | |||
| 554 | channel->max_len = 0x7fffffff; | ||
| 555 | channel->desired_mode = 0; | ||
| 556 | channel->actual_len = 0; | ||
| 557 | |||
| 558 | if (tusb_dma->multichannel) { | ||
| 559 | ret = tusb_omap_dma_allocate_dmareq(chdat); | ||
| 560 | if (ret != 0) | ||
| 561 | goto free_dmareq; | ||
| 562 | |||
| 563 | ret = omap_request_dma(chdat->sync_dev, dev_name, | ||
| 564 | tusb_omap_dma_cb, channel, &chdat->ch); | ||
| 565 | if (ret != 0) | ||
| 566 | goto free_dmareq; | ||
| 567 | } else if (tusb_dma->ch == -1) { | ||
| 568 | tusb_dma->dmareq = 0; | ||
| 569 | tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0; | ||
| 570 | |||
| 571 | /* Callback data gets set later in the shared dmareq case */ | ||
| 572 | ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared", | ||
| 573 | tusb_omap_dma_cb, NULL, &tusb_dma->ch); | ||
| 574 | if (ret != 0) | ||
| 575 | goto free_dmareq; | ||
| 576 | |||
| 577 | chdat->dmareq = -1; | ||
| 578 | chdat->ch = -1; | ||
| 579 | } | ||
| 580 | |||
| 581 | DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", | ||
| 582 | chdat->epnum, | ||
| 583 | chdat->tx ? "tx" : "rx", | ||
| 584 | chdat->ch >= 0 ? "dedicated" : "shared", | ||
| 585 | chdat->ch >= 0 ? chdat->ch : tusb_dma->ch, | ||
| 586 | chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq, | ||
| 587 | chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev); | ||
| 588 | |||
| 589 | return channel; | ||
| 590 | |||
| 591 | free_dmareq: | ||
| 592 | tusb_omap_dma_free_dmareq(chdat); | ||
| 593 | |||
| 594 | DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum); | ||
| 595 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 596 | |||
| 597 | return NULL; | ||
| 598 | } | ||
| 599 | |||
| 600 | static void tusb_omap_dma_release(struct dma_channel *channel) | ||
| 601 | { | ||
| 602 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
| 603 | struct musb *musb = chdat->musb; | ||
| 604 | void __iomem *tbase = musb->ctrl_base; | ||
| 605 | u32 reg; | ||
| 606 | |||
| 607 | DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch); | ||
| 608 | |||
| 609 | reg = musb_readl(tbase, TUSB_DMA_INT_MASK); | ||
| 610 | if (chdat->tx) | ||
| 611 | reg |= (1 << chdat->epnum); | ||
| 612 | else | ||
| 613 | reg |= (1 << (chdat->epnum + 15)); | ||
| 614 | musb_writel(tbase, TUSB_DMA_INT_MASK, reg); | ||
| 615 | |||
| 616 | reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR); | ||
| 617 | if (chdat->tx) | ||
| 618 | reg |= (1 << chdat->epnum); | ||
| 619 | else | ||
| 620 | reg |= (1 << (chdat->epnum + 15)); | ||
| 621 | musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg); | ||
| 622 | |||
| 623 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 624 | |||
| 625 | if (chdat->ch >= 0) { | ||
| 626 | omap_stop_dma(chdat->ch); | ||
| 627 | omap_free_dma(chdat->ch); | ||
| 628 | chdat->ch = -1; | ||
| 629 | } | ||
| 630 | |||
| 631 | if (chdat->dmareq >= 0) | ||
| 632 | tusb_omap_dma_free_dmareq(chdat); | ||
| 633 | |||
| 634 | channel = NULL; | ||
| 635 | } | ||
| 636 | |||
| 637 | void dma_controller_destroy(struct dma_controller *c) | ||
| 638 | { | ||
| 639 | struct tusb_omap_dma *tusb_dma; | ||
| 640 | int i; | ||
| 641 | |||
| 642 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
| 643 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
| 644 | struct dma_channel *ch = dma_channel_pool[i]; | ||
| 645 | if (ch) { | ||
| 646 | kfree(ch->private_data); | ||
| 647 | kfree(ch); | ||
| 648 | } | ||
| 649 | } | ||
| 650 | |||
| 651 | if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) | ||
| 652 | omap_free_dma(tusb_dma->ch); | ||
| 653 | |||
| 654 | kfree(tusb_dma); | ||
| 655 | } | ||
| 656 | |||
| 657 | struct dma_controller *__init | ||
| 658 | dma_controller_create(struct musb *musb, void __iomem *base) | ||
| 659 | { | ||
| 660 | void __iomem *tbase = musb->ctrl_base; | ||
| 661 | struct tusb_omap_dma *tusb_dma; | ||
| 662 | int i; | ||
| 663 | |||
| 664 | /* REVISIT: Get dmareq lines used from board-*.c */ | ||
| 665 | |||
| 666 | musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff); | ||
| 667 | musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0); | ||
| 668 | |||
| 669 | musb_writel(tbase, TUSB_DMA_REQ_CONF, | ||
| 670 | TUSB_DMA_REQ_CONF_BURST_SIZE(2) | ||
| 671 | | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | ||
| 672 | | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); | ||
| 673 | |||
| 674 | tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); | ||
| 675 | if (!tusb_dma) | ||
| 676 | goto cleanup; | ||
| 677 | |||
| 678 | tusb_dma->musb = musb; | ||
| 679 | tusb_dma->tbase = musb->ctrl_base; | ||
| 680 | |||
| 681 | tusb_dma->ch = -1; | ||
| 682 | tusb_dma->dmareq = -1; | ||
| 683 | tusb_dma->sync_dev = -1; | ||
| 684 | |||
| 685 | tusb_dma->controller.start = tusb_omap_dma_start; | ||
| 686 | tusb_dma->controller.stop = tusb_omap_dma_stop; | ||
| 687 | tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate; | ||
| 688 | tusb_dma->controller.channel_release = tusb_omap_dma_release; | ||
| 689 | tusb_dma->controller.channel_program = tusb_omap_dma_program; | ||
| 690 | tusb_dma->controller.channel_abort = tusb_omap_dma_abort; | ||
| 691 | |||
| 692 | if (tusb_get_revision(musb) >= TUSB_REV_30) | ||
| 693 | tusb_dma->multichannel = 1; | ||
| 694 | |||
| 695 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
| 696 | struct dma_channel *ch; | ||
| 697 | struct tusb_omap_dma_ch *chdat; | ||
| 698 | |||
| 699 | ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL); | ||
| 700 | if (!ch) | ||
| 701 | goto cleanup; | ||
| 702 | |||
| 703 | dma_channel_pool[i] = ch; | ||
| 704 | |||
| 705 | chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL); | ||
| 706 | if (!chdat) | ||
| 707 | goto cleanup; | ||
| 708 | |||
| 709 | ch->status = MUSB_DMA_STATUS_UNKNOWN; | ||
| 710 | ch->private_data = chdat; | ||
| 711 | } | ||
| 712 | |||
| 713 | return &tusb_dma->controller; | ||
| 714 | |||
| 715 | cleanup: | ||
| 716 | dma_controller_destroy(&tusb_dma->controller); | ||
| 717 | |||
| 718 | return NULL; | ||
| 719 | } | ||
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 8878c1767fc8..70338f4ec918 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig | |||
| @@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED | |||
| 499 | config USB_SERIAL_SIERRAWIRELESS | 499 | config USB_SERIAL_SIERRAWIRELESS |
| 500 | tristate "USB Sierra Wireless Driver" | 500 | tristate "USB Sierra Wireless Driver" |
| 501 | help | 501 | help |
| 502 | Say M here if you want to use a Sierra Wireless device (if | 502 | Say M here if you want to use Sierra Wireless devices. |
| 503 | using an PC 5220 or AC580 please use the Airprime driver | 503 | |
| 504 | instead). | 504 | Many deviecs have a feature known as TRU-Install, for those devices |
| 505 | to work properly the USB Storage Sierra feature must be enabled. | ||
| 505 | 506 | ||
| 506 | To compile this driver as a module, choose M here: the | 507 | To compile this driver as a module, choose M here: the |
| 507 | module will be called sierra. | 508 | module will be called sierra. |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 838717250145..984f6eff4c47 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 563 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 563 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
| 564 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | 564 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, |
| 565 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 565 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
| 566 | { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, | ||
| 566 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 567 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
| 567 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 568 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
| 568 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 569 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
| @@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 637 | { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, | 638 | { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, |
| 638 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 639 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
| 639 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 640 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
| 641 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, | ||
| 640 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 642 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
| 641 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, | 643 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, |
| 642 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, | 644 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, |
| @@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = { | |||
| 646 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 648 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 647 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), | 649 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), |
| 648 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 650 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 651 | { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID), | ||
| 652 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
| 653 | { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID), | ||
| 654 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
| 649 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, | 655 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, |
| 650 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, | 656 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, |
| 651 | { }, /* Optional parameter entry */ | 657 | { }, /* Optional parameter entry */ |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index a577ea44dcf9..382265bba969 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
| @@ -524,7 +524,9 @@ | |||
| 524 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ | 524 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ |
| 525 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ | 525 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ |
| 526 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ | 526 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ |
| 527 | #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ | ||
| 527 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ | 528 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ |
| 529 | #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ | ||
| 528 | 530 | ||
| 529 | /* | 531 | /* |
| 530 | * Definitions for ID TECH (www.idt-net.com) devices | 532 | * Definitions for ID TECH (www.idt-net.com) devices |
| @@ -815,6 +817,11 @@ | |||
| 815 | #define OLIMEX_VID 0x15BA | 817 | #define OLIMEX_VID 0x15BA |
| 816 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 | 818 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 |
| 817 | 819 | ||
| 820 | /* Luminary Micro Stellaris Boards, VID = FTDI_VID */ | ||
| 821 | /* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */ | ||
| 822 | #define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 | ||
| 823 | #define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 | ||
| 824 | |||
| 818 | /* www.elsterelectricity.com Elster Unicom III Optical Probe */ | 825 | /* www.elsterelectricity.com Elster Unicom III Optical Probe */ |
| 819 | #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ | 826 | #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ |
| 820 | 827 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e4eca95f2b0f..e143198aeb02 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -186,6 +186,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
| 186 | #define BANDRICH_VENDOR_ID 0x1A8D | 186 | #define BANDRICH_VENDOR_ID 0x1A8D |
| 187 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 187 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
| 188 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 188 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
| 189 | #define BANDRICH_PRODUCT_1004 0x1004 | ||
| 190 | #define BANDRICH_PRODUCT_1005 0x1005 | ||
| 191 | #define BANDRICH_PRODUCT_1006 0x1006 | ||
| 192 | #define BANDRICH_PRODUCT_1007 0x1007 | ||
| 193 | #define BANDRICH_PRODUCT_1008 0x1008 | ||
| 194 | #define BANDRICH_PRODUCT_1009 0x1009 | ||
| 195 | #define BANDRICH_PRODUCT_100A 0x100a | ||
| 196 | |||
| 197 | #define BANDRICH_PRODUCT_100B 0x100b | ||
| 198 | #define BANDRICH_PRODUCT_100C 0x100c | ||
| 199 | #define BANDRICH_PRODUCT_100D 0x100d | ||
| 200 | #define BANDRICH_PRODUCT_100E 0x100e | ||
| 201 | |||
| 202 | #define BANDRICH_PRODUCT_100F 0x100f | ||
| 203 | #define BANDRICH_PRODUCT_1010 0x1010 | ||
| 204 | #define BANDRICH_PRODUCT_1011 0x1011 | ||
| 205 | #define BANDRICH_PRODUCT_1012 0x1012 | ||
| 189 | 206 | ||
| 190 | #define AMOI_VENDOR_ID 0x1614 | 207 | #define AMOI_VENDOR_ID 0x1614 |
| 191 | #define AMOI_PRODUCT_9508 0x0800 | 208 | #define AMOI_PRODUCT_9508 0x0800 |
| @@ -197,6 +214,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
| 197 | #define TELIT_VENDOR_ID 0x1bc7 | 214 | #define TELIT_VENDOR_ID 0x1bc7 |
| 198 | #define TELIT_PRODUCT_UC864E 0x1003 | 215 | #define TELIT_PRODUCT_UC864E 0x1003 |
| 199 | 216 | ||
| 217 | /* ZTE PRODUCTS */ | ||
| 218 | #define ZTE_VENDOR_ID 0x19d2 | ||
| 219 | #define ZTE_PRODUCT_MF628 0x0015 | ||
| 220 | |||
| 200 | static struct usb_device_id option_ids[] = { | 221 | static struct usb_device_id option_ids[] = { |
| 201 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 222 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
| 202 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 223 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
| @@ -302,12 +323,28 @@ static struct usb_device_id option_ids[] = { | |||
| 302 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, | 323 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, |
| 303 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 324 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
| 304 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 325 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
| 326 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, | ||
| 327 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) }, | ||
| 328 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) }, | ||
| 329 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) }, | ||
| 330 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) }, | ||
| 331 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) }, | ||
| 332 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) }, | ||
| 333 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) }, | ||
| 334 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) }, | ||
| 335 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) }, | ||
| 336 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) }, | ||
| 337 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) }, | ||
| 338 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) }, | ||
| 339 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) }, | ||
| 340 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) }, | ||
| 305 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, | 341 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, |
| 306 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 342 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
| 307 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ | 343 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ |
| 308 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 344 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
| 309 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 345 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
| 310 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 346 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
| 347 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | ||
| 311 | { } /* Terminating entry */ | 348 | { } /* Terminating entry */ |
| 312 | }; | 349 | }; |
| 313 | MODULE_DEVICE_TABLE(usb, option_ids); | 350 | MODULE_DEVICE_TABLE(usb, option_ids); |
| @@ -346,11 +383,7 @@ static struct usb_serial_driver option_1port_device = { | |||
| 346 | .read_int_callback = option_instat_callback, | 383 | .read_int_callback = option_instat_callback, |
| 347 | }; | 384 | }; |
| 348 | 385 | ||
| 349 | #ifdef CONFIG_USB_DEBUG | ||
| 350 | static int debug; | 386 | static int debug; |
| 351 | #else | ||
| 352 | #define debug 0 | ||
| 353 | #endif | ||
| 354 | 387 | ||
| 355 | /* per port private data */ | 388 | /* per port private data */ |
| 356 | 389 | ||
| @@ -954,8 +987,5 @@ MODULE_DESCRIPTION(DRIVER_DESC); | |||
| 954 | MODULE_VERSION(DRIVER_VERSION); | 987 | MODULE_VERSION(DRIVER_VERSION); |
| 955 | MODULE_LICENSE("GPL"); | 988 | MODULE_LICENSE("GPL"); |
| 956 | 989 | ||
| 957 | #ifdef CONFIG_USB_DEBUG | ||
| 958 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 990 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
| 959 | MODULE_PARM_DESC(debug, "Debug messages"); | 991 | MODULE_PARM_DESC(debug, "Debug messages"); |
| 960 | #endif | ||
| 961 | |||
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 2c9c446ad625..1ede1441cb1b 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = { | |||
| 90 | { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, | 90 | { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, |
| 91 | { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, | 91 | { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, |
| 92 | { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, | 92 | { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, |
| 93 | { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) }, | ||
| 94 | { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, | 93 | { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, |
| 95 | { } /* Terminating entry */ | 94 | { } /* Terminating entry */ |
| 96 | }; | 95 | }; |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 6ac3bbcf7a22..a3bd039c78e9 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
| @@ -107,10 +107,6 @@ | |||
| 107 | #define COREGA_VENDOR_ID 0x07aa | 107 | #define COREGA_VENDOR_ID 0x07aa |
| 108 | #define COREGA_PRODUCT_ID 0x002a | 108 | #define COREGA_PRODUCT_ID 0x002a |
| 109 | 109 | ||
| 110 | /* HL HL-340 (ID: 4348:5523) */ | ||
| 111 | #define HL340_VENDOR_ID 0x4348 | ||
| 112 | #define HL340_PRODUCT_ID 0x5523 | ||
| 113 | |||
| 114 | /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ | 110 | /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ |
| 115 | #define YCCABLE_VENDOR_ID 0x05ad | 111 | #define YCCABLE_VENDOR_ID 0x05ad |
| 116 | #define YCCABLE_PRODUCT_ID 0x0fba | 112 | #define YCCABLE_PRODUCT_ID 0x0fba |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 2f6f1523ec56..706033753adb 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> | 14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #define DRIVER_VERSION "v.1.2.9c" | 17 | #define DRIVER_VERSION "v.1.2.13a" |
| 18 | #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" | 18 | #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" |
| 19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" | 19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" |
| 20 | 20 | ||
| @@ -31,6 +31,7 @@ | |||
| 31 | #define SWIMS_USB_REQUEST_SetPower 0x00 | 31 | #define SWIMS_USB_REQUEST_SetPower 0x00 |
| 32 | #define SWIMS_USB_REQUEST_SetNmea 0x07 | 32 | #define SWIMS_USB_REQUEST_SetNmea 0x07 |
| 33 | #define SWIMS_USB_REQUEST_SetMode 0x0B | 33 | #define SWIMS_USB_REQUEST_SetMode 0x0B |
| 34 | #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A | ||
| 34 | #define SWIMS_SET_MODE_Modem 0x0001 | 35 | #define SWIMS_SET_MODE_Modem 0x0001 |
| 35 | 36 | ||
| 36 | /* per port private data */ | 37 | /* per port private data */ |
| @@ -40,18 +41,11 @@ | |||
| 40 | 41 | ||
| 41 | static int debug; | 42 | static int debug; |
| 42 | static int nmea; | 43 | static int nmea; |
| 43 | static int truinstall = 1; | ||
| 44 | |||
| 45 | enum devicetype { | ||
| 46 | DEVICE_3_PORT = 0, | ||
| 47 | DEVICE_1_PORT = 1, | ||
| 48 | DEVICE_INSTALLER = 2, | ||
| 49 | }; | ||
| 50 | 44 | ||
| 51 | static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) | 45 | static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) |
| 52 | { | 46 | { |
| 53 | int result; | 47 | int result; |
| 54 | dev_dbg(&udev->dev, "%s", "SET POWER STATE\n"); | 48 | dev_dbg(&udev->dev, "%s", __func__); |
| 55 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 49 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
| 56 | SWIMS_USB_REQUEST_SetPower, /* __u8 request */ | 50 | SWIMS_USB_REQUEST_SetPower, /* __u8 request */ |
| 57 | USB_TYPE_VENDOR, /* __u8 request type */ | 51 | USB_TYPE_VENDOR, /* __u8 request type */ |
| @@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) | |||
| 63 | return result; | 57 | return result; |
| 64 | } | 58 | } |
| 65 | 59 | ||
| 66 | static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) | ||
| 67 | { | ||
| 68 | int result; | ||
| 69 | dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n"); | ||
| 70 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
| 71 | SWIMS_USB_REQUEST_SetMode, /* __u8 request */ | ||
| 72 | USB_TYPE_VENDOR, /* __u8 request type */ | ||
| 73 | eSWocMode, /* __u16 value */ | ||
| 74 | 0x0000, /* __u16 index */ | ||
| 75 | NULL, /* void *data */ | ||
| 76 | 0, /* __u16 size */ | ||
| 77 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
| 78 | return result; | ||
| 79 | } | ||
| 80 | |||
| 81 | static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) | 60 | static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) |
| 82 | { | 61 | { |
| 83 | int result; | 62 | int result; |
| 84 | dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n"); | 63 | dev_dbg(&udev->dev, "%s", __func__); |
| 85 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 64 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
| 86 | SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ | 65 | SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ |
| 87 | USB_TYPE_VENDOR, /* __u8 request type */ | 66 | USB_TYPE_VENDOR, /* __u8 request type */ |
| @@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial) | |||
| 97 | { | 76 | { |
| 98 | int result; | 77 | int result; |
| 99 | int *num_ports = usb_get_serial_data(serial); | 78 | int *num_ports = usb_get_serial_data(serial); |
| 79 | dev_dbg(&serial->dev->dev, "%s", __func__); | ||
| 100 | 80 | ||
| 101 | result = *num_ports; | 81 | result = *num_ports; |
| 102 | 82 | ||
| @@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial) | |||
| 110 | 90 | ||
| 111 | static int sierra_calc_interface(struct usb_serial *serial) | 91 | static int sierra_calc_interface(struct usb_serial *serial) |
| 112 | { | 92 | { |
| 113 | int interface; | 93 | int interface; |
| 114 | struct usb_interface *p_interface; | 94 | struct usb_interface *p_interface; |
| 115 | struct usb_host_interface *p_host_interface; | 95 | struct usb_host_interface *p_host_interface; |
| 96 | dev_dbg(&serial->dev->dev, "%s", __func__); | ||
| 116 | 97 | ||
| 117 | /* Get the interface structure pointer from the serial struct */ | 98 | /* Get the interface structure pointer from the serial struct */ |
| 118 | p_interface = serial->interface; | 99 | p_interface = serial->interface; |
| 119 | 100 | ||
| 120 | /* Get a pointer to the host interface structure */ | 101 | /* Get a pointer to the host interface structure */ |
| 121 | p_host_interface = p_interface->cur_altsetting; | 102 | p_host_interface = p_interface->cur_altsetting; |
| 122 | 103 | ||
| 123 | /* read the interface descriptor for this active altsetting | 104 | /* read the interface descriptor for this active altsetting |
| 124 | * to find out the interface number we are on | 105 | * to find out the interface number we are on |
| 125 | */ | 106 | */ |
| 126 | interface = p_host_interface->desc.bInterfaceNumber; | 107 | interface = p_host_interface->desc.bInterfaceNumber; |
| 127 | 108 | ||
| 128 | return interface; | 109 | return interface; |
| 129 | } | 110 | } |
| 130 | 111 | ||
| 131 | static int sierra_probe(struct usb_serial *serial, | 112 | static int sierra_probe(struct usb_serial *serial, |
| @@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial, | |||
| 135 | struct usb_device *udev; | 116 | struct usb_device *udev; |
| 136 | int *num_ports; | 117 | int *num_ports; |
| 137 | u8 ifnum; | 118 | u8 ifnum; |
| 119 | u8 numendpoints; | ||
| 120 | |||
| 121 | dev_dbg(&serial->dev->dev, "%s", __func__); | ||
| 138 | 122 | ||
| 139 | num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); | 123 | num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); |
| 140 | if (!num_ports) | 124 | if (!num_ports) |
| 141 | return -ENOMEM; | 125 | return -ENOMEM; |
| 142 | 126 | ||
| 143 | ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; | 127 | ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; |
| 128 | numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints; | ||
| 144 | udev = serial->dev; | 129 | udev = serial->dev; |
| 145 | 130 | ||
| 146 | /* Figure out the interface number from the serial structure */ | 131 | /* Figure out the interface number from the serial structure */ |
| 147 | ifnum = sierra_calc_interface(serial); | 132 | ifnum = sierra_calc_interface(serial); |
| 148 | |||
| 149 | /* | ||
| 150 | * If this interface supports more than 1 alternate | ||
| 151 | * select the 2nd one | ||
| 152 | */ | ||
| 153 | if (serial->interface->num_altsetting == 2) { | ||
| 154 | dev_dbg(&udev->dev, | ||
| 155 | "Selecting alt setting for interface %d\n", | ||
| 156 | ifnum); | ||
| 157 | 133 | ||
| 158 | /* We know the alternate setting is 1 for the MC8785 */ | 134 | /* |
| 159 | usb_set_interface(udev, ifnum, 1); | 135 | * If this interface supports more than 1 alternate |
| 160 | } | 136 | * select the 2nd one |
| 137 | */ | ||
| 138 | if (serial->interface->num_altsetting == 2) { | ||
| 139 | dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n", | ||
| 140 | ifnum); | ||
| 141 | /* We know the alternate setting is 1 for the MC8785 */ | ||
| 142 | usb_set_interface(udev, ifnum, 1); | ||
| 143 | } | ||
| 161 | 144 | ||
| 162 | /* Check if in installer mode */ | 145 | /* Dummy interface present on some SKUs should be ignored */ |
| 163 | if (truinstall && id->driver_info == DEVICE_INSTALLER) { | 146 | if (ifnum == 0x99) |
| 164 | dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n"); | ||
| 165 | result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); | ||
| 166 | /* Don't bind to the device when in installer mode */ | ||
| 167 | kfree(num_ports); | ||
| 168 | return -EIO; | ||
| 169 | } else if (id->driver_info == DEVICE_1_PORT) | ||
| 170 | *num_ports = 1; | ||
| 171 | else if (ifnum == 0x99) | ||
| 172 | *num_ports = 0; | 147 | *num_ports = 0; |
| 148 | else if (numendpoints <= 3) | ||
| 149 | *num_ports = 1; | ||
| 173 | else | 150 | else |
| 174 | *num_ports = 3; | 151 | *num_ports = (numendpoints-1)/2; |
| 152 | |||
| 175 | /* | 153 | /* |
| 176 | * save off our num_ports info so that we can use it in the | 154 | * save off our num_ports info so that we can use it in the |
| 177 | * calc_num_ports callback | 155 | * calc_num_ports callback |
| @@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = { | |||
| 187 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ | 165 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
| 188 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ | 166 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ |
| 189 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ | 167 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
| 168 | { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ | ||
| 190 | { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ | 169 | { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ |
| 191 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ | 170 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ |
| 192 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ | 171 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
| 193 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ | 172 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ |
| 194 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */ | 173 | /* Sierra Wireless C597 */ |
| 174 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, | ||
| 175 | /* Sierra Wireless Device */ | ||
| 176 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, | ||
| 177 | { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ | ||
| 195 | 178 | ||
| 196 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ | 179 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ |
| 197 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ | 180 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
| 198 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ | 181 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ |
| 199 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ | 182 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ |
| 200 | { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ | 183 | { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */ |
| 201 | { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ | 184 | { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ |
| 202 | { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ | 185 | { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ |
| 203 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ | 186 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ |
| 204 | { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ | 187 | { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ |
| 205 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ | 188 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ |
| 206 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ | 189 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ |
| 207 | { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/ | 190 | { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ |
| 191 | { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ | ||
| 192 | { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ | ||
| 193 | { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */ | ||
| 208 | { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ | 194 | { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ |
| 209 | { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ | 195 | { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ |
| 210 | { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ | 196 | { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ |
| 211 | { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ | 197 | { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ |
| 212 | { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ | 198 | { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ |
| 213 | { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ | 199 | { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ |
| 214 | { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ | 200 | { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ |
| 215 | { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ | 201 | { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ |
| 216 | 202 | /* Sierra Wireless C885 */ | |
| 217 | { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */ | 203 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, |
| 218 | { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */ | 204 | /* Sierra Wireless Device */ |
| 219 | 205 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, | |
| 220 | { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ | 206 | /* Sierra Wireless Device */ |
| 221 | { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ | 207 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, |
| 208 | |||
| 209 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ | ||
| 210 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ | ||
| 222 | 211 | ||
| 223 | { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER}, | ||
| 224 | { } | 212 | { } |
| 225 | }; | 213 | }; |
| 226 | MODULE_DEVICE_TABLE(usb, id_table); | 214 | MODULE_DEVICE_TABLE(usb, id_table); |
| @@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty, | |||
| 268 | if (portdata->rts_state) | 256 | if (portdata->rts_state) |
| 269 | val |= 0x02; | 257 | val |= 0x02; |
| 270 | 258 | ||
| 271 | /* Determine which port is targeted */ | 259 | /* If composite device then properly report interface */ |
| 272 | if (port->bulk_out_endpointAddress == 2) | 260 | if (serial->num_ports == 1) |
| 273 | interface = 0; | 261 | interface = sierra_calc_interface(serial); |
| 274 | else if (port->bulk_out_endpointAddress == 4) | 262 | |
| 275 | interface = 1; | 263 | /* Otherwise the need to do non-composite mapping */ |
| 276 | else if (port->bulk_out_endpointAddress == 5) | 264 | else { |
| 277 | interface = 2; | 265 | if (port->bulk_out_endpointAddress == 2) |
| 266 | interface = 0; | ||
| 267 | else if (port->bulk_out_endpointAddress == 4) | ||
| 268 | interface = 1; | ||
| 269 | else if (port->bulk_out_endpointAddress == 5) | ||
| 270 | interface = 2; | ||
| 271 | } | ||
| 278 | 272 | ||
| 279 | return usb_control_msg(serial->dev, | 273 | return usb_control_msg(serial->dev, |
| 280 | usb_rcvctrlpipe(serial->dev, 0), | 274 | usb_rcvctrlpipe(serial->dev, 0), |
| @@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial) | |||
| 713 | static struct usb_serial_driver sierra_device = { | 707 | static struct usb_serial_driver sierra_device = { |
| 714 | .driver = { | 708 | .driver = { |
| 715 | .owner = THIS_MODULE, | 709 | .owner = THIS_MODULE, |
| 716 | .name = "sierra1", | 710 | .name = "sierra", |
| 717 | }, | 711 | }, |
| 718 | .description = "Sierra USB modem", | 712 | .description = "Sierra USB modem", |
| 719 | .id_table = id_table, | 713 | .id_table = id_table, |
| @@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC); | |||
| 769 | MODULE_VERSION(DRIVER_VERSION); | 763 | MODULE_VERSION(DRIVER_VERSION); |
| 770 | MODULE_LICENSE("GPL"); | 764 | MODULE_LICENSE("GPL"); |
| 771 | 765 | ||
| 772 | module_param(truinstall, bool, 0); | 766 | module_param(nmea, bool, S_IRUGO | S_IWUSR); |
| 773 | MODULE_PARM_DESC(truinstall, "TRU-Install support"); | ||
| 774 | |||
| 775 | module_param(nmea, bool, 0); | ||
| 776 | MODULE_PARM_DESC(nmea, "NMEA streaming"); | 767 | MODULE_PARM_DESC(nmea, "NMEA streaming"); |
| 777 | 768 | ||
| 778 | #ifdef CONFIG_USB_DEBUG | ||
| 779 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 769 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
| 780 | MODULE_PARM_DESC(debug, "Debug messages"); | 770 | MODULE_PARM_DESC(debug, "Debug messages"); |
| 781 | #endif | ||
| 782 | |||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8c2d531eedea..b157c48e8b78 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
| @@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial) | |||
| 122 | 122 | ||
| 123 | dbg("%s", __func__); | 123 | dbg("%s", __func__); |
| 124 | 124 | ||
| 125 | if (serial == NULL) | ||
| 126 | return; | ||
| 127 | |||
| 128 | for (i = 0; i < serial->num_ports; ++i) | 125 | for (i = 0; i < serial->num_ports; ++i) |
| 129 | serial_table[serial->minor + i] = NULL; | 126 | serial_table[serial->minor + i] = NULL; |
| 130 | } | 127 | } |
| @@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref) | |||
| 142 | serial->type->shutdown(serial); | 139 | serial->type->shutdown(serial); |
| 143 | 140 | ||
| 144 | /* return the minor range that this device had */ | 141 | /* return the minor range that this device had */ |
| 145 | return_serial(serial); | 142 | if (serial->minor != SERIAL_TTY_NO_MINOR) |
| 143 | return_serial(serial); | ||
| 146 | 144 | ||
| 147 | for (i = 0; i < serial->num_ports; ++i) | 145 | for (i = 0; i < serial->num_ports; ++i) |
| 148 | serial->port[i]->port.count = 0; | 146 | serial->port[i]->port.count = 0; |
| @@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, | |||
| 575 | serial->interface = interface; | 573 | serial->interface = interface; |
| 576 | kref_init(&serial->kref); | 574 | kref_init(&serial->kref); |
| 577 | mutex_init(&serial->disc_mutex); | 575 | mutex_init(&serial->disc_mutex); |
| 576 | serial->minor = SERIAL_TTY_NO_MINOR; | ||
| 578 | 577 | ||
| 579 | return serial; | 578 | return serial; |
| 580 | } | 579 | } |
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 3d9249632ae1..c76034672c18 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig | |||
| @@ -146,6 +146,18 @@ config USB_STORAGE_KARMA | |||
| 146 | on the resulting scsi device node returns the Karma to normal | 146 | on the resulting scsi device node returns the Karma to normal |
| 147 | operation. | 147 | operation. |
| 148 | 148 | ||
| 149 | config USB_STORAGE_SIERRA | ||
| 150 | bool "Sierra Wireless TRU-Install Feature Support" | ||
| 151 | depends on USB_STORAGE | ||
| 152 | help | ||
| 153 | Say Y here to include additional code to support Sierra Wireless | ||
| 154 | products with the TRU-Install feature (e.g., AC597E, AC881U). | ||
| 155 | |||
| 156 | This code switches the Sierra Wireless device from being in | ||
| 157 | Mass Storage mode to Modem mode. It also has the ability to | ||
| 158 | support host software upgrades should full Linux support be added | ||
| 159 | to TRU-Install. | ||
| 160 | |||
| 149 | config USB_STORAGE_CYPRESS_ATACB | 161 | config USB_STORAGE_CYPRESS_ATACB |
| 150 | bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" | 162 | bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" |
| 151 | depends on USB_STORAGE | 163 | depends on USB_STORAGE |
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile index 4c596c766c53..bc3415b475c9 100644 --- a/drivers/usb/storage/Makefile +++ b/drivers/usb/storage/Makefile | |||
| @@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o | |||
| 21 | usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o | 21 | usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o |
| 22 | usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o | 22 | usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o |
| 23 | usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o | 23 | usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o |
| 24 | usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o | ||
| 24 | usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o | 25 | usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o |
| 25 | 26 | ||
| 26 | usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ | 27 | usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ |
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c new file mode 100644 index 000000000000..4359a2cb42df --- /dev/null +++ b/drivers/usb/storage/sierra_ms.c | |||
| @@ -0,0 +1,207 @@ | |||
| 1 | #include <scsi/scsi.h> | ||
| 2 | #include <scsi/scsi_host.h> | ||
| 3 | #include <scsi/scsi_cmnd.h> | ||
| 4 | #include <scsi/scsi_device.h> | ||
| 5 | #include <linux/usb.h> | ||
| 6 | |||
| 7 | #include "usb.h" | ||
| 8 | #include "transport.h" | ||
| 9 | #include "protocol.h" | ||
| 10 | #include "scsiglue.h" | ||
| 11 | #include "sierra_ms.h" | ||
| 12 | #include "debug.h" | ||
| 13 | |||
| 14 | #define SWIMS_USB_REQUEST_SetSwocMode 0x0B | ||
| 15 | #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A | ||
| 16 | #define SWIMS_USB_INDEX_SetMode 0x0000 | ||
| 17 | #define SWIMS_SET_MODE_Modem 0x0001 | ||
| 18 | |||
| 19 | #define TRU_NORMAL 0x01 | ||
| 20 | #define TRU_FORCE_MS 0x02 | ||
| 21 | #define TRU_FORCE_MODEM 0x03 | ||
| 22 | |||
| 23 | static unsigned int swi_tru_install = 1; | ||
| 24 | module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR); | ||
| 25 | MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def)," | ||
| 26 | " 2=Force CD-Rom, 3=Force Modem)"); | ||
| 27 | |||
| 28 | struct swoc_info { | ||
| 29 | __u8 rev; | ||
| 30 | __u8 reserved[8]; | ||
| 31 | __u16 LinuxSKU; | ||
| 32 | __u16 LinuxVer; | ||
| 33 | __u8 reserved2[47]; | ||
| 34 | } __attribute__((__packed__)); | ||
| 35 | |||
| 36 | static bool containsFullLinuxPackage(struct swoc_info *swocInfo) | ||
| 37 | { | ||
| 38 | if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) || | ||
| 39 | (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF)) | ||
| 40 | return true; | ||
| 41 | else | ||
| 42 | return false; | ||
| 43 | } | ||
| 44 | |||
| 45 | static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) | ||
| 46 | { | ||
| 47 | int result; | ||
| 48 | US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n"); | ||
| 49 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
| 50 | SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */ | ||
| 51 | USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */ | ||
| 52 | eSWocMode, /* __u16 value */ | ||
| 53 | 0x0000, /* __u16 index */ | ||
| 54 | NULL, /* void *data */ | ||
| 55 | 0, /* __u16 size */ | ||
| 56 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
| 57 | return result; | ||
| 58 | } | ||
| 59 | |||
| 60 | |||
| 61 | static int sierra_get_swoc_info(struct usb_device *udev, | ||
| 62 | struct swoc_info *swocInfo) | ||
| 63 | { | ||
| 64 | int result; | ||
| 65 | |||
| 66 | US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n"); | ||
| 67 | |||
| 68 | result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
| 69 | SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */ | ||
| 70 | USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */ | ||
| 71 | 0, /* __u16 value */ | ||
| 72 | 0, /* __u16 index */ | ||
| 73 | (void *) swocInfo, /* void *data */ | ||
| 74 | sizeof(struct swoc_info), /* __u16 size */ | ||
| 75 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
| 76 | |||
| 77 | swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU); | ||
| 78 | swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer); | ||
| 79 | return result; | ||
| 80 | } | ||
| 81 | |||
| 82 | static void debug_swoc(struct swoc_info *swocInfo) | ||
| 83 | { | ||
| 84 | US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev); | ||
| 85 | US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU); | ||
| 86 | US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer); | ||
| 87 | } | ||
| 88 | |||
| 89 | |||
| 90 | static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, | ||
| 91 | char *buf) | ||
| 92 | { | ||
| 93 | struct swoc_info *swocInfo; | ||
| 94 | struct usb_interface *intf = to_usb_interface(dev); | ||
| 95 | struct usb_device *udev = interface_to_usbdev(intf); | ||
| 96 | int result; | ||
| 97 | if (swi_tru_install == TRU_FORCE_MS) { | ||
| 98 | result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n"); | ||
| 99 | } else { | ||
| 100 | swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); | ||
| 101 | if (!swocInfo) { | ||
| 102 | US_DEBUGP("SWIMS: Allocation failure\n"); | ||
| 103 | snprintf(buf, PAGE_SIZE, "Error\n"); | ||
| 104 | return -ENOMEM; | ||
| 105 | } | ||
| 106 | result = sierra_get_swoc_info(udev, swocInfo); | ||
| 107 | if (result < 0) { | ||
| 108 | US_DEBUGP("SWIMS: failed SWoC query\n"); | ||
| 109 | kfree(swocInfo); | ||
| 110 | snprintf(buf, PAGE_SIZE, "Error\n"); | ||
| 111 | return -EIO; | ||
| 112 | } | ||
| 113 | debug_swoc(swocInfo); | ||
| 114 | result = snprintf(buf, PAGE_SIZE, | ||
| 115 | "REV=%02d SKU=%04X VER=%04X\n", | ||
| 116 | swocInfo->rev, | ||
| 117 | swocInfo->LinuxSKU, | ||
| 118 | swocInfo->LinuxVer); | ||
| 119 | kfree(swocInfo); | ||
| 120 | } | ||
| 121 | return result; | ||
| 122 | } | ||
| 123 | static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); | ||
| 124 | |||
| 125 | int sierra_ms_init(struct us_data *us) | ||
| 126 | { | ||
| 127 | int result, retries; | ||
| 128 | signed long delay_t; | ||
| 129 | struct swoc_info *swocInfo; | ||
| 130 | struct usb_device *udev; | ||
| 131 | struct Scsi_Host *sh; | ||
| 132 | struct scsi_device *sd; | ||
| 133 | |||
| 134 | delay_t = 2; | ||
| 135 | retries = 3; | ||
| 136 | result = 0; | ||
| 137 | udev = us->pusb_dev; | ||
| 138 | |||
| 139 | sh = us_to_host(us); | ||
| 140 | sd = scsi_get_host_dev(sh); | ||
| 141 | |||
| 142 | US_DEBUGP("SWIMS: sierra_ms_init called\n"); | ||
| 143 | |||
| 144 | /* Force Modem mode */ | ||
| 145 | if (swi_tru_install == TRU_FORCE_MODEM) { | ||
| 146 | US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n"); | ||
| 147 | result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); | ||
| 148 | if (result < 0) | ||
| 149 | US_DEBUGP("SWIMS: Failed to switch to modem mode.\n"); | ||
| 150 | return -EIO; | ||
| 151 | } | ||
| 152 | /* Force Mass Storage mode (keep CD-Rom) */ | ||
| 153 | else if (swi_tru_install == TRU_FORCE_MS) { | ||
| 154 | US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n"); | ||
| 155 | goto complete; | ||
| 156 | } | ||
| 157 | /* Normal TRU-Install Logic */ | ||
| 158 | else { | ||
| 159 | US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n"); | ||
| 160 | |||
| 161 | swocInfo = kmalloc(sizeof(struct swoc_info), | ||
| 162 | GFP_KERNEL); | ||
| 163 | if (!swocInfo) { | ||
| 164 | US_DEBUGP("SWIMS: %s", "Allocation failure\n"); | ||
| 165 | return -ENOMEM; | ||
| 166 | } | ||
| 167 | |||
| 168 | retries = 3; | ||
| 169 | do { | ||
| 170 | retries--; | ||
| 171 | result = sierra_get_swoc_info(udev, swocInfo); | ||
| 172 | if (result < 0) { | ||
| 173 | US_DEBUGP("SWIMS: %s", "Failed SWoC query\n"); | ||
| 174 | schedule_timeout_uninterruptible(2*HZ); | ||
| 175 | } | ||
| 176 | } while (retries && result < 0); | ||
| 177 | |||
| 178 | if (result < 0) { | ||
| 179 | US_DEBUGP("SWIMS: %s", | ||
| 180 | "Completely failed SWoC query\n"); | ||
| 181 | kfree(swocInfo); | ||
| 182 | return -EIO; | ||
| 183 | } | ||
| 184 | |||
| 185 | debug_swoc(swocInfo); | ||
| 186 | |||
| 187 | /* If there is not Linux software on the TRU-Install device | ||
| 188 | * then switch to modem mode | ||
| 189 | */ | ||
| 190 | if (!containsFullLinuxPackage(swocInfo)) { | ||
| 191 | US_DEBUGP("SWIMS: %s", | ||
| 192 | "Switching to Modem Mode\n"); | ||
| 193 | result = sierra_set_ms_mode(udev, | ||
| 194 | SWIMS_SET_MODE_Modem); | ||
| 195 | if (result < 0) | ||
| 196 | US_DEBUGP("SWIMS: Failed to switch modem\n"); | ||
| 197 | kfree(swocInfo); | ||
| 198 | return -EIO; | ||
| 199 | } | ||
| 200 | kfree(swocInfo); | ||
| 201 | } | ||
| 202 | complete: | ||
| 203 | result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst); | ||
| 204 | |||
| 205 | return USB_STOR_TRANSPORT_GOOD; | ||
| 206 | } | ||
| 207 | |||
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h new file mode 100644 index 000000000000..bb48634ac1fc --- /dev/null +++ b/drivers/usb/storage/sierra_ms.h | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | #ifndef _SIERRA_MS_H_ | ||
| 2 | #define _SIERRA_MS_H_ | ||
| 3 | extern int sierra_ms_init(struct us_data *us); | ||
| 4 | #endif | ||
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index fcbbfdb7b2b0..3523a0bfa0ff 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
| @@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) | |||
| 1032 | 1032 | ||
| 1033 | /* try to compute the actual residue, based on how much data | 1033 | /* try to compute the actual residue, based on how much data |
| 1034 | * was really transferred and what the device tells us */ | 1034 | * was really transferred and what the device tells us */ |
| 1035 | if (residue) { | 1035 | if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) { |
| 1036 | if (!(us->fflags & US_FL_IGNORE_RESIDUE)) { | 1036 | |
| 1037 | /* Heuristically detect devices that generate bogus residues | ||
| 1038 | * by seeing what happens with INQUIRY and READ CAPACITY | ||
| 1039 | * commands. | ||
| 1040 | */ | ||
| 1041 | if (bcs->Status == US_BULK_STAT_OK && | ||
| 1042 | scsi_get_resid(srb) == 0 && | ||
| 1043 | ((srb->cmnd[0] == INQUIRY && | ||
| 1044 | transfer_length == 36) || | ||
| 1045 | (srb->cmnd[0] == READ_CAPACITY && | ||
| 1046 | transfer_length == 8))) { | ||
| 1047 | us->fflags |= US_FL_IGNORE_RESIDUE; | ||
| 1048 | |||
| 1049 | } else { | ||
| 1037 | residue = min(residue, transfer_length); | 1050 | residue = min(residue, transfer_length); |
| 1038 | scsi_set_resid(srb, max(scsi_get_resid(srb), | 1051 | scsi_set_resid(srb, max(scsi_get_resid(srb), |
| 1039 | (int) residue)); | 1052 | (int) residue)); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 7ae69f55aa96..ba412e68d474 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370, | |||
| 225 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 225 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
| 226 | US_FL_MAX_SECTORS_64 ), | 226 | US_FL_MAX_SECTORS_64 ), |
| 227 | 227 | ||
| 228 | /* Reported by Cedric Godin <cedric@belbone.be> */ | ||
| 229 | UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, | ||
| 230 | "Nokia", | ||
| 231 | "5300", | ||
| 232 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
| 233 | US_FL_FIX_CAPACITY ), | ||
| 234 | |||
| 228 | /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ | 235 | /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ |
| 229 | UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, | 236 | UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, |
| 230 | "SMSC", | 237 | "SMSC", |
| @@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200, | |||
| 356 | US_FL_FIX_CAPACITY), | 363 | US_FL_FIX_CAPACITY), |
| 357 | 364 | ||
| 358 | /* Reported by Emil Larsson <emil@swip.net> */ | 365 | /* Reported by Emil Larsson <emil@swip.net> */ |
| 359 | UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110, | 366 | UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111, |
| 360 | "NIKON", | 367 | "NIKON", |
| 361 | "NIKON DSC D80", | 368 | "NIKON DSC D80", |
| 362 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 369 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
| 363 | US_FL_FIX_CAPACITY), | 370 | US_FL_FIX_CAPACITY), |
| 364 | 371 | ||
| 365 | /* Reported by Ortwin Glueck <odi@odi.ch> */ | 372 | /* Reported by Ortwin Glueck <odi@odi.ch> */ |
| 366 | UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110, | 373 | UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111, |
| 367 | "NIKON", | 374 | "NIKON", |
| 368 | "NIKON DSC D40", | 375 | "NIKON DSC D40", |
| 369 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 376 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
| @@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, | |||
| 1185 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1192 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
| 1186 | US_FL_FIX_INQUIRY ), | 1193 | US_FL_FIX_INQUIRY ), |
| 1187 | 1194 | ||
| 1195 | /* Reported by Rauch Wolke <rauchwolke@gmx.net> */ | ||
| 1196 | UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, | ||
| 1197 | "Simple Tech/Datafab", | ||
| 1198 | "CF+SM Reader", | ||
| 1199 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
| 1200 | US_FL_IGNORE_RESIDUE ), | ||
| 1201 | |||
| 1188 | /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant | 1202 | /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant |
| 1189 | * to the USB storage specification in two ways: | 1203 | * to the USB storage specification in two ways: |
| 1190 | * - They tell us they are using transport protocol CBI. In reality they | 1204 | * - They tell us they are using transport protocol CBI. In reality they |
| @@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, | |||
| 1562 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1576 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
| 1563 | 0), | 1577 | 0), |
| 1564 | 1578 | ||
| 1579 | #ifdef CONFIG_USB_STORAGE_SIERRA | ||
| 1565 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> | 1580 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> |
| 1566 | * Entry is needed for the initializer function override, | 1581 | * Entry is needed for the initializer function override, |
| 1567 | * which instructs the device to load as a modem | 1582 | * which instructs the device to load as a modem |
| @@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, | |||
| 1570 | UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, | 1585 | UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, |
| 1571 | "Sierra Wireless", | 1586 | "Sierra Wireless", |
| 1572 | "USB MMC Storage", | 1587 | "USB MMC Storage", |
| 1573 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1588 | US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, |
| 1574 | US_FL_IGNORE_DEVICE), | 1589 | 0), |
| 1590 | #endif | ||
| 1575 | 1591 | ||
| 1576 | /* Reported by Jaco Kroon <jaco@kroon.co.za> | 1592 | /* Reported by Jaco Kroon <jaco@kroon.co.za> |
| 1577 | * The usb-storage module found on the Digitech GNX4 (and supposedly other | 1593 | * The usb-storage module found on the Digitech GNX4 (and supposedly other |
| @@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002, | |||
| 1743 | US_FL_FIX_CAPACITY), | 1759 | US_FL_FIX_CAPACITY), |
| 1744 | 1760 | ||
| 1745 | /* | 1761 | /* |
| 1762 | * Patch by Jost Diederichs <jost@qdusa.com> | ||
| 1763 | */ | ||
| 1764 | UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999, | ||
| 1765 | "Motorola Inc.", | ||
| 1766 | "Motorola Phone (RAZRV3xx)", | ||
| 1767 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
| 1768 | US_FL_FIX_CAPACITY), | ||
| 1769 | |||
| 1770 | /* | ||
| 1746 | * Patch by Constantin Baranov <const@tltsu.ru> | 1771 | * Patch by Constantin Baranov <const@tltsu.ru> |
| 1747 | * Report by Andreas Koenecke. | 1772 | * Report by Andreas Koenecke. |
| 1748 | * Motorola ROKR Z6. | 1773 | * Motorola ROKR Z6. |
| @@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010, | |||
| 1767 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1792 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
| 1768 | US_FL_FIX_CAPACITY ), | 1793 | US_FL_FIX_CAPACITY ), |
| 1769 | 1794 | ||
| 1795 | /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ | ||
| 1796 | UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, | ||
| 1797 | "iRiver", | ||
| 1798 | "MP3 T10", | ||
| 1799 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
| 1800 | US_FL_IGNORE_RESIDUE ), | ||
| 1801 | |||
| 1770 | /* | 1802 | /* |
| 1771 | * David Härdeman <david@2gen.com> | 1803 | * David Härdeman <david@2gen.com> |
| 1772 | * The key makes the SCSI stack print confusing (but harmless) messages | 1804 | * The key makes the SCSI stack print confusing (but harmless) messages |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index bfea851be985..73679aa506de 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
| @@ -102,6 +102,9 @@ | |||
| 102 | #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB | 102 | #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB |
| 103 | #include "cypress_atacb.h" | 103 | #include "cypress_atacb.h" |
| 104 | #endif | 104 | #endif |
| 105 | #ifdef CONFIG_USB_STORAGE_SIERRA | ||
| 106 | #include "sierra_ms.h" | ||
| 107 | #endif | ||
| 105 | 108 | ||
| 106 | /* Some informational data */ | 109 | /* Some informational data */ |
| 107 | MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); | 110 | MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 5811c5da69f9..0924cd9c30f6 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -110,6 +110,8 @@ enum usb_interface_condition { | |||
| 110 | * @sysfs_files_created: sysfs attributes exist | 110 | * @sysfs_files_created: sysfs attributes exist |
| 111 | * @needs_remote_wakeup: flag set when the driver requires remote-wakeup | 111 | * @needs_remote_wakeup: flag set when the driver requires remote-wakeup |
| 112 | * capability during autosuspend. | 112 | * capability during autosuspend. |
| 113 | * @needs_binding: flag set when the driver should be re-probed or unbound | ||
| 114 | * following a reset or suspend operation it doesn't support. | ||
| 113 | * @dev: driver model's view of this device | 115 | * @dev: driver model's view of this device |
| 114 | * @usb_dev: if an interface is bound to the USB major, this will point | 116 | * @usb_dev: if an interface is bound to the USB major, this will point |
| 115 | * to the sysfs representation for that device. | 117 | * to the sysfs representation for that device. |
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 000000000000..630962c04ca4 --- /dev/null +++ b/include/linux/usb/musb.h | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | /* | ||
| 2 | * This is used to for host and peripheral modes of the driver for | ||
| 3 | * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. | ||
| 4 | * | ||
| 5 | * Board initialization should put one of these into dev->platform_data, | ||
| 6 | * probably on some platform_device named "musb_hdrc". It encapsulates | ||
| 7 | * key configuration differences between boards. | ||
| 8 | */ | ||
| 9 | |||
| 10 | /* The USB role is defined by the connector used on the board, so long as | ||
| 11 | * standards are being followed. (Developer boards sometimes won't.) | ||
| 12 | */ | ||
| 13 | enum musb_mode { | ||
| 14 | MUSB_UNDEFINED = 0, | ||
| 15 | MUSB_HOST, /* A or Mini-A connector */ | ||
| 16 | MUSB_PERIPHERAL, /* B or Mini-B connector */ | ||
| 17 | MUSB_OTG /* Mini-AB connector */ | ||
| 18 | }; | ||
| 19 | |||
| 20 | struct clk; | ||
| 21 | |||
| 22 | struct musb_hdrc_eps_bits { | ||
| 23 | const char name[16]; | ||
| 24 | u8 bits; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct musb_hdrc_config { | ||
| 28 | /* MUSB configuration-specific details */ | ||
| 29 | unsigned multipoint:1; /* multipoint device */ | ||
| 30 | unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ | ||
| 31 | unsigned soft_con:1; /* soft connect required */ | ||
| 32 | unsigned utm_16:1; /* utm data witdh is 16 bits */ | ||
| 33 | unsigned big_endian:1; /* true if CPU uses big-endian */ | ||
| 34 | unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ | ||
| 35 | unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ | ||
| 36 | unsigned high_iso_tx:1; /* Tx ep required for HB iso */ | ||
| 37 | unsigned high_iso_rx:1; /* Rx ep required for HD iso */ | ||
| 38 | unsigned dma:1; /* supports DMA */ | ||
| 39 | unsigned vendor_req:1; /* vendor registers required */ | ||
| 40 | |||
| 41 | u8 num_eps; /* number of endpoints _with_ ep0 */ | ||
| 42 | u8 dma_channels; /* number of dma channels */ | ||
| 43 | u8 dyn_fifo_size; /* dynamic size in bytes */ | ||
| 44 | u8 vendor_ctrl; /* vendor control reg width */ | ||
| 45 | u8 vendor_stat; /* vendor status reg witdh */ | ||
| 46 | u8 dma_req_chan; /* bitmask for required dma channels */ | ||
| 47 | u8 ram_bits; /* ram address size */ | ||
| 48 | |||
| 49 | struct musb_hdrc_eps_bits *eps_bits; | ||
| 50 | }; | ||
| 51 | |||
| 52 | struct musb_hdrc_platform_data { | ||
| 53 | /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ | ||
| 54 | u8 mode; | ||
| 55 | |||
| 56 | /* for clk_get() */ | ||
| 57 | const char *clock; | ||
| 58 | |||
| 59 | /* (HOST or OTG) switch VBUS on/off */ | ||
| 60 | int (*set_vbus)(struct device *dev, int is_on); | ||
| 61 | |||
| 62 | /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ | ||
| 63 | u8 power; | ||
| 64 | |||
| 65 | /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ | ||
| 66 | u8 min_power; | ||
| 67 | |||
| 68 | /* (HOST or OTG) msec/2 after VBUS on till power good */ | ||
| 69 | u8 potpgt; | ||
| 70 | |||
| 71 | /* Power the device on or off */ | ||
| 72 | int (*set_power)(int state); | ||
| 73 | |||
| 74 | /* Turn device clock on or off */ | ||
| 75 | int (*set_clock)(struct clk *clock, int is_on); | ||
| 76 | |||
| 77 | /* MUSB configuration-specific details */ | ||
| 78 | struct musb_hdrc_config *config; | ||
| 79 | }; | ||
| 80 | |||
| 81 | |||
| 82 | /* TUSB 6010 support */ | ||
| 83 | |||
| 84 | #define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ | ||
| 85 | #define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ | ||
| 86 | #define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ | ||
| 87 | |||
| 88 | #ifdef CONFIG_ARCH_OMAP2 | ||
| 89 | |||
| 90 | extern int __init tusb6010_setup_interface( | ||
| 91 | struct musb_hdrc_platform_data *data, | ||
| 92 | unsigned ps_refclk, unsigned waitpin, | ||
| 93 | unsigned async_cs, unsigned sync_cs, | ||
| 94 | unsigned irq, unsigned dmachan); | ||
| 95 | |||
| 96 | extern int tusb6010_platform_retime(unsigned is_refclk); | ||
| 97 | |||
| 98 | #endif /* OMAP2 */ | ||
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 09a3e6a7518f..655341d0f534 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
| @@ -17,7 +17,8 @@ | |||
| 17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
| 18 | 18 | ||
| 19 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ | 19 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ |
| 20 | #define SERIAL_TTY_MINORS 255 /* loads of devices :) */ | 20 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ |
| 21 | #define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ | ||
| 21 | 22 | ||
| 22 | /* The maximum number of ports one device can grab at once */ | 23 | /* The maximum number of ports one device can grab at once */ |
| 23 | #define MAX_NUM_PORTS 8 | 24 | #define MAX_NUM_PORTS 8 |
