diff options
Diffstat (limited to 'drivers/usb/mtu3/mtu3_qmu.c')
-rw-r--r-- | drivers/usb/mtu3/mtu3_qmu.c | 118 |
1 files changed, 76 insertions, 42 deletions
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c index 09f19f70fe8f..3f414f91b589 100644 --- a/drivers/usb/mtu3/mtu3_qmu.c +++ b/drivers/usb/mtu3/mtu3_qmu.c | |||
@@ -22,17 +22,49 @@ | |||
22 | #include <linux/iopoll.h> | 22 | #include <linux/iopoll.h> |
23 | 23 | ||
24 | #include "mtu3.h" | 24 | #include "mtu3.h" |
25 | #include "mtu3_trace.h" | ||
25 | 26 | ||
26 | #define QMU_CHECKSUM_LEN 16 | 27 | #define QMU_CHECKSUM_LEN 16 |
27 | 28 | ||
28 | #define GPD_FLAGS_HWO BIT(0) | 29 | #define GPD_FLAGS_HWO BIT(0) |
29 | #define GPD_FLAGS_BDP BIT(1) | 30 | #define GPD_FLAGS_BDP BIT(1) |
30 | #define GPD_FLAGS_BPS BIT(2) | 31 | #define GPD_FLAGS_BPS BIT(2) |
32 | #define GPD_FLAGS_ZLP BIT(6) | ||
31 | #define GPD_FLAGS_IOC BIT(7) | 33 | #define GPD_FLAGS_IOC BIT(7) |
32 | 34 | #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO) | |
33 | #define GPD_EXT_FLAG_ZLP BIT(5) | 35 | |
34 | #define GPD_EXT_NGP(x) (((x) & 0xf) << 4) | 36 | #define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16) |
35 | #define GPD_EXT_BUF(x) (((x) & 0xf) << 0) | 37 | #define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12) |
38 | #define GPD_RX_BUF_LEN(mtu, x) \ | ||
39 | ({ \ | ||
40 | typeof(x) x_ = (x); \ | ||
41 | ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \ | ||
42 | }) | ||
43 | |||
44 | #define GPD_DATA_LEN_OG(x) ((x) & 0xffff) | ||
45 | #define GPD_DATA_LEN_EL(x) ((x) & 0xfffff) | ||
46 | #define GPD_DATA_LEN(mtu, x) \ | ||
47 | ({ \ | ||
48 | typeof(x) x_ = (x); \ | ||
49 | ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \ | ||
50 | }) | ||
51 | |||
52 | #define GPD_EXT_FLAG_ZLP BIT(29) | ||
53 | #define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20) | ||
54 | #define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16) | ||
55 | #define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28) | ||
56 | #define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24) | ||
57 | #define GPD_EXT_NGP(mtu, x) \ | ||
58 | ({ \ | ||
59 | typeof(x) x_ = (x); \ | ||
60 | ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \ | ||
61 | }) | ||
62 | |||
63 | #define GPD_EXT_BUF(mtu, x) \ | ||
64 | ({ \ | ||
65 | typeof(x) x_ = (x); \ | ||
66 | ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \ | ||
67 | }) | ||
36 | 68 | ||
37 | #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo)) | 69 | #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo)) |
38 | #define HILO_DMA(hi, lo) \ | 70 | #define HILO_DMA(hi, lo) \ |
@@ -125,7 +157,7 @@ static void reset_gpd_list(struct mtu3_ep *mep) | |||
125 | struct qmu_gpd *gpd = ring->start; | 157 | struct qmu_gpd *gpd = ring->start; |
126 | 158 | ||
127 | if (gpd) { | 159 | if (gpd) { |
128 | gpd->flag &= ~GPD_FLAGS_HWO; | 160 | gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); |
129 | gpd_ring_init(ring, gpd); | 161 | gpd_ring_init(ring, gpd); |
130 | } | 162 | } |
131 | } | 163 | } |
@@ -214,16 +246,14 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) | |||
214 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; | 246 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; |
215 | struct qmu_gpd *gpd = ring->enqueue; | 247 | struct qmu_gpd *gpd = ring->enqueue; |
216 | struct usb_request *req = &mreq->request; | 248 | struct usb_request *req = &mreq->request; |
249 | struct mtu3 *mtu = mep->mtu; | ||
217 | dma_addr_t enq_dma; | 250 | dma_addr_t enq_dma; |
218 | u16 ext_addr; | 251 | u32 ext_addr; |
219 | |||
220 | /* set all fields to zero as default value */ | ||
221 | memset(gpd, 0, sizeof(*gpd)); | ||
222 | 252 | ||
253 | gpd->dw0_info = 0; /* SW own it */ | ||
223 | gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); | 254 | gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); |
224 | ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); | 255 | ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma)); |
225 | gpd->buf_len = cpu_to_le16(req->length); | 256 | gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length)); |
226 | gpd->flag |= GPD_FLAGS_IOC; | ||
227 | 257 | ||
228 | /* get the next GPD */ | 258 | /* get the next GPD */ |
229 | enq = advance_enq_gpd(ring); | 259 | enq = advance_enq_gpd(ring); |
@@ -231,17 +261,22 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) | |||
231 | dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", | 261 | dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", |
232 | mep->epnum, gpd, enq, &enq_dma); | 262 | mep->epnum, gpd, enq, &enq_dma); |
233 | 263 | ||
234 | enq->flag &= ~GPD_FLAGS_HWO; | 264 | enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); |
235 | gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); | 265 | gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); |
236 | ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); | 266 | ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); |
237 | gpd->tx_ext_addr = cpu_to_le16(ext_addr); | 267 | gpd->dw0_info = cpu_to_le32(ext_addr); |
238 | 268 | ||
239 | if (req->zero) | 269 | if (req->zero) { |
240 | gpd->ext_flag |= GPD_EXT_FLAG_ZLP; | 270 | if (mtu->gen2cp) |
271 | gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP); | ||
272 | else | ||
273 | gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); | ||
274 | } | ||
241 | 275 | ||
242 | gpd->flag |= GPD_FLAGS_HWO; | 276 | gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); |
243 | 277 | ||
244 | mreq->gpd = gpd; | 278 | mreq->gpd = gpd; |
279 | trace_mtu3_prepare_gpd(mep, gpd); | ||
245 | 280 | ||
246 | return 0; | 281 | return 0; |
247 | } | 282 | } |
@@ -252,16 +287,14 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) | |||
252 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; | 287 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; |
253 | struct qmu_gpd *gpd = ring->enqueue; | 288 | struct qmu_gpd *gpd = ring->enqueue; |
254 | struct usb_request *req = &mreq->request; | 289 | struct usb_request *req = &mreq->request; |
290 | struct mtu3 *mtu = mep->mtu; | ||
255 | dma_addr_t enq_dma; | 291 | dma_addr_t enq_dma; |
256 | u16 ext_addr; | 292 | u32 ext_addr; |
257 | |||
258 | /* set all fields to zero as default value */ | ||
259 | memset(gpd, 0, sizeof(*gpd)); | ||
260 | 293 | ||
294 | gpd->dw0_info = 0; /* SW own it */ | ||
261 | gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); | 295 | gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); |
262 | ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); | 296 | ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma)); |
263 | gpd->data_buf_len = cpu_to_le16(req->length); | 297 | gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length)); |
264 | gpd->flag |= GPD_FLAGS_IOC; | ||
265 | 298 | ||
266 | /* get the next GPD */ | 299 | /* get the next GPD */ |
267 | enq = advance_enq_gpd(ring); | 300 | enq = advance_enq_gpd(ring); |
@@ -269,13 +302,14 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) | |||
269 | dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", | 302 | dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", |
270 | mep->epnum, gpd, enq, &enq_dma); | 303 | mep->epnum, gpd, enq, &enq_dma); |
271 | 304 | ||
272 | enq->flag &= ~GPD_FLAGS_HWO; | 305 | enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); |
273 | gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); | 306 | gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); |
274 | ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); | 307 | ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); |
275 | gpd->rx_ext_addr = cpu_to_le16(ext_addr); | 308 | gpd->dw3_info = cpu_to_le32(ext_addr); |
276 | gpd->flag |= GPD_FLAGS_HWO; | 309 | gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); |
277 | 310 | ||
278 | mreq->gpd = gpd; | 311 | mreq->gpd = gpd; |
312 | trace_mtu3_prepare_gpd(mep, gpd); | ||
279 | 313 | ||
280 | return 0; | 314 | return 0; |
281 | } | 315 | } |
@@ -382,27 +416,25 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) | |||
382 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; | 416 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; |
383 | void __iomem *mbase = mtu->mac_base; | 417 | void __iomem *mbase = mtu->mac_base; |
384 | struct qmu_gpd *gpd_current = NULL; | 418 | struct qmu_gpd *gpd_current = NULL; |
385 | struct usb_request *req = NULL; | ||
386 | struct mtu3_request *mreq; | 419 | struct mtu3_request *mreq; |
387 | dma_addr_t cur_gpd_dma; | 420 | dma_addr_t cur_gpd_dma; |
388 | u32 txcsr = 0; | 421 | u32 txcsr = 0; |
389 | int ret; | 422 | int ret; |
390 | 423 | ||
391 | mreq = next_request(mep); | 424 | mreq = next_request(mep); |
392 | if (mreq && mreq->request.length == 0) | 425 | if (mreq && mreq->request.length != 0) |
393 | req = &mreq->request; | ||
394 | else | ||
395 | return; | 426 | return; |
396 | 427 | ||
397 | cur_gpd_dma = read_txq_cur_addr(mbase, epnum); | 428 | cur_gpd_dma = read_txq_cur_addr(mbase, epnum); |
398 | gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); | 429 | gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); |
399 | 430 | ||
400 | if (le16_to_cpu(gpd_current->buf_len) != 0) { | 431 | if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) { |
401 | dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); | 432 | dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); |
402 | return; | 433 | return; |
403 | } | 434 | } |
404 | 435 | ||
405 | dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, req); | 436 | dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); |
437 | trace_mtu3_zlp_exp_gpd(mep, gpd_current); | ||
406 | 438 | ||
407 | mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); | 439 | mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); |
408 | 440 | ||
@@ -415,8 +447,7 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) | |||
415 | mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); | 447 | mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); |
416 | 448 | ||
417 | /* by pass the current GDP */ | 449 | /* by pass the current GDP */ |
418 | gpd_current->flag |= GPD_FLAGS_BPS; | 450 | gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); |
419 | gpd_current->flag |= GPD_FLAGS_HWO; | ||
420 | 451 | ||
421 | /*enable DMAREQEN, switch back to QMU mode */ | 452 | /*enable DMAREQEN, switch back to QMU mode */ |
422 | mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); | 453 | mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); |
@@ -448,7 +479,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) | |||
448 | dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", | 479 | dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", |
449 | __func__, epnum, gpd, gpd_current, ring->enqueue); | 480 | __func__, epnum, gpd, gpd_current, ring->enqueue); |
450 | 481 | ||
451 | while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { | 482 | while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { |
452 | 483 | ||
453 | mreq = next_request(mep); | 484 | mreq = next_request(mep); |
454 | 485 | ||
@@ -458,7 +489,8 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) | |||
458 | } | 489 | } |
459 | 490 | ||
460 | request = &mreq->request; | 491 | request = &mreq->request; |
461 | request->actual = le16_to_cpu(gpd->buf_len); | 492 | request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); |
493 | trace_mtu3_complete_gpd(mep, gpd); | ||
462 | mtu3_req_complete(mep, request, 0); | 494 | mtu3_req_complete(mep, request, 0); |
463 | 495 | ||
464 | gpd = advance_deq_gpd(ring); | 496 | gpd = advance_deq_gpd(ring); |
@@ -486,7 +518,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) | |||
486 | dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", | 518 | dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", |
487 | __func__, epnum, gpd, gpd_current, ring->enqueue); | 519 | __func__, epnum, gpd, gpd_current, ring->enqueue); |
488 | 520 | ||
489 | while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { | 521 | while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { |
490 | 522 | ||
491 | mreq = next_request(mep); | 523 | mreq = next_request(mep); |
492 | 524 | ||
@@ -496,7 +528,8 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) | |||
496 | } | 528 | } |
497 | req = &mreq->request; | 529 | req = &mreq->request; |
498 | 530 | ||
499 | req->actual = le16_to_cpu(gpd->buf_len); | 531 | req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); |
532 | trace_mtu3_complete_gpd(mep, gpd); | ||
500 | mtu3_req_complete(mep, req, 0); | 533 | mtu3_req_complete(mep, req, 0); |
501 | 534 | ||
502 | gpd = advance_deq_gpd(ring); | 535 | gpd = advance_deq_gpd(ring); |
@@ -574,6 +607,7 @@ irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) | |||
574 | dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", | 607 | dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", |
575 | (qmu_done_status & 0xFFFF), qmu_done_status >> 16, | 608 | (qmu_done_status & 0xFFFF), qmu_done_status >> 16, |
576 | qmu_status); | 609 | qmu_status); |
610 | trace_mtu3_qmu_isr(qmu_done_status, qmu_status); | ||
577 | 611 | ||
578 | if (qmu_done_status) | 612 | if (qmu_done_status) |
579 | qmu_done_isr(mtu, qmu_done_status); | 613 | qmu_done_isr(mtu, qmu_done_status); |