aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorFelipe Contreras <felipe.contreras@gmail.com>2010-11-10 13:12:19 -0500
committerOmar Ramirez Luna <omar.ramirez@ti.com>2010-11-10 19:34:44 -0500
commit1cf3fb2d359a87880a6a6e0cb25b2ec2d493b119 (patch)
treec2dd70ce07ab0b80230c9c83cb0a614574cfedb3 /drivers/staging
parentd0b345f3ee03a7e8823dd3f8dfbb48aaeeac7c89 (diff)
Revert "staging: tidspbridge - move shared memory iommu maps to tiomap3430.c"
This reverts commit 0c10e91b6cc9d1c6a23e9eed3e0653f30b6eb3d3. Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com> Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h13
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c125
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c135
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c12
4 files changed, 146 insertions, 139 deletions
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index c1bf95d756b3..a42c3931fc7e 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -308,18 +308,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
308 308
309#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) 309#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
310 310
311struct shm_segs {
312 u32 seg0_da;
313 u32 seg0_pa;
314 u32 seg0_va;
315 u32 seg0_size;
316 u32 seg1_da;
317 u32 seg1_pa;
318 u32 seg1_va;
319 u32 seg1_size;
320};
321
322
323/* This Bridge driver's device context: */ 311/* This Bridge driver's device context: */
324struct bridge_dev_context { 312struct bridge_dev_context {
325 struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 313 struct dev_object *hdev_obj; /* Handle to Bridge device object. */
@@ -343,7 +331,6 @@ struct bridge_dev_context {
343 331
344 struct omap_mbox *mbox; /* Mail box handle */ 332 struct omap_mbox *mbox; /* Mail box handle */
345 struct iommu *dsp_mmu; /* iommu for iva2 handler */ 333 struct iommu *dsp_mmu; /* iommu for iva2 handler */
346 struct shm_segs sh_s;
347 struct cfg_hostres *resources; /* Host Resources */ 334 struct cfg_hostres *resources; /* Host Resources */
348 335
349 /* 336 /*
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 56856ad54fec..842b8dbc441a 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -291,7 +291,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
291 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
292 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
293 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
294 struct shm_segs *sm_sg; 294 struct iommu *mmu;
295 u32 ul_shm_base; 295 u32 ul_shm_base;
296 u32 ul_shm_base_offset; 296 u32 ul_shm_base_offset;
297 u32 ul_shm_limit; 297 u32 ul_shm_limit;
@@ -317,6 +317,14 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
317 u32 shm0_end; 317 u32 shm0_end;
318 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
319 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
320 u32 pa_curr = 0;
321 u32 va_curr = 0;
322 u32 gpp_va_curr = 0;
323 u32 num_bytes = 0;
324 u32 all_bits = 0;
325 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
326 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327 };
320 328
321 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 329 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
322 if (!pbridge_context) { 330 if (!pbridge_context) {
@@ -329,7 +337,19 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
329 status = -EFAULT; 337 status = -EFAULT;
330 goto func_end; 338 goto func_end;
331 } 339 }
332 sm_sg = &pbridge_context->sh_s; 340 mmu = pbridge_context->dsp_mmu;
341
342 if (mmu)
343 iommu_put(mmu);
344 mmu = iommu_get("iva2");
345
346 if (IS_ERR_OR_NULL(mmu)) {
347 dev_err(bridge, "iommu_get failed!\n");
348 pbridge_context->dsp_mmu = NULL;
349 status = -EFAULT;
350 goto func_end;
351 }
352 pbridge_context->dsp_mmu = mmu;
333 353
334 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 354 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
335 if (!cod_man) { 355 if (!cod_man) {
@@ -465,14 +485,74 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
465 if (status) 485 if (status)
466 goto func_end; 486 goto func_end;
467 487
468 sm_sg->seg1_pa = ul_gpp_pa; 488 pa_curr = ul_gpp_pa;
469 sm_sg->seg1_da = ul_dyn_ext_base; 489 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
470 sm_sg->seg1_va = ul_gpp_va; 490 gpp_va_curr = ul_gpp_va;
471 sm_sg->seg1_size = ul_seg1_size; 491 num_bytes = ul_seg1_size;
472 sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; 492
473 sm_sg->seg0_da = ul_dsp_va; 493 va_curr = iommu_kmap(mmu, va_curr, pa_curr, num_bytes,
474 sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; 494 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
475 sm_sg->seg0_size = ul_seg_size; 495 if (IS_ERR_VALUE(va_curr)) {
496 status = (int)va_curr;
497 goto func_end;
498 }
499
500 pa_curr += ul_pad_size + num_bytes;
501 va_curr += ul_pad_size + num_bytes;
502 gpp_va_curr += ul_pad_size + num_bytes;
503
504 /* Configure the TLB entries for the next cacheable segment */
505 num_bytes = ul_seg_size;
506 va_curr = ul_dsp_va * hio_mgr->word_size;
507 while (num_bytes) {
508 /*
509 * To find the max. page size with which both PA & VA are
510 * aligned.
511 */
512 all_bits = pa_curr | va_curr;
513 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
514 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
515 va_curr, num_bytes);
516 for (i = 0; i < 4; i++) {
517 if (!(num_bytes >= page_size[i]) ||
518 !((all_bits & (page_size[i] - 1)) == 0))
519 continue;
520 if (ndx < MAX_LOCK_TLB_ENTRIES) {
521 /*
522 * This is the physical address written to
523 * DSP MMU.
524 */
525 ae_proc[ndx].ul_gpp_pa = pa_curr;
526 /*
527 * This is the virtual uncached ioremapped
528 * address!!!
529 */
530 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
531 ae_proc[ndx].ul_dsp_va =
532 va_curr / hio_mgr->word_size;
533 ae_proc[ndx].ul_size = page_size[i];
534 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
535 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
536 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
537 dev_dbg(bridge, "shm MMU TLB entry PA %x"
538 " VA %x DSP_VA %x Size %x\n",
539 ae_proc[ndx].ul_gpp_pa,
540 ae_proc[ndx].ul_gpp_va,
541 ae_proc[ndx].ul_dsp_va *
542 hio_mgr->word_size, page_size[i]);
543 ndx++;
544 }
545 pa_curr += page_size[i];
546 va_curr += page_size[i];
547 gpp_va_curr += page_size[i];
548 num_bytes -= page_size[i];
549 /*
550 * Don't try smaller sizes. Hopefully we have reached
551 * an address aligned to a bigger page size.
552 */
553 break;
554 }
555 }
476 556
477 /* 557 /*
478 * Copy remaining entries from CDB. All entries are 1 MB and 558 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -519,6 +599,24 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
519 goto func_end; 599 goto func_end;
520 } 600 }
521 601
602 /* Map the L4 peripherals */
603 i = 0;
604 while (l4_peripheral_table[i].phys_addr) {
605 status = iommu_kmap(mmu, l4_peripheral_table[i].
606 dsp_virt_addr, l4_peripheral_table[i].phys_addr,
607 PAGE_SIZE, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
608 if (IS_ERR_VALUE(status))
609 break;
610 i++;
611 }
612 if (IS_ERR_VALUE(status)) {
613 while (i--)
614 iommu_kunmap(mmu, l4_peripheral_table[i].
615 dsp_virt_addr);
616 goto func_end;
617 }
618 status = 0;
619
522 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 620 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
523 ae_proc[i].ul_dsp_va = 0; 621 ae_proc[i].ul_dsp_va = 0;
524 ae_proc[i].ul_gpp_pa = 0; 622 ae_proc[i].ul_gpp_pa = 0;
@@ -541,12 +639,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
541 status = -EFAULT; 639 status = -EFAULT;
542 goto func_end; 640 goto func_end;
543 } else { 641 } else {
544 if (sm_sg->seg0_da > ul_shm_base) { 642 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
545 status = -EPERM; 643 status = -EPERM;
546 goto func_end; 644 goto func_end;
547 } 645 }
548 /* ul_shm_base may not be at ul_dsp_va address */ 646 /* ul_shm_base may not be at ul_dsp_va address */
549 ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * 647 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
550 hio_mgr->word_size; 648 hio_mgr->word_size;
551 /* 649 /*
552 * bridge_dev_ctrl() will set dev context dsp-mmu info. In 650 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -570,7 +668,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
570 goto func_end; 668 goto func_end;
571 } 669 }
572 /* Register SM */ 670 /* Register SM */
573 status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); 671 status =
672 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
574 } 673 }
575 674
576 hio_mgr->shared_mem = (struct shm *)ul_shm_base; 675 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 7ab272ca643d..ec85529efee0 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -301,7 +301,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
301 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, 301 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
302 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 302 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
303 } 303 }
304 304 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
305 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
305 dsp_clk_enable(DSP_CLK_IVA2); 306 dsp_clk_enable(DSP_CLK_IVA2);
306 307
307 /* set the device state to IDLE */ 308 /* set the device state to IDLE */
@@ -372,17 +373,15 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
372{ 373{
373 int status = 0; 374 int status = 0;
374 struct bridge_dev_context *dev_context = dev_ctxt; 375 struct bridge_dev_context *dev_context = dev_ctxt;
375 struct iommu *mmu = NULL; 376 struct iommu *mmu;
376 struct shm_segs *sm_sg;
377 int l4_i = 0, tlb_i = 0;
378 u32 sg0_da = 0, sg1_da = 0;
379 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
380 u32 dw_sync_addr = 0; 377 u32 dw_sync_addr = 0;
381 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 378 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
382 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 379 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
383 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ 380 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
384 /* Offset of shm_base_virt from tlb_base_virt */ 381 /* Offset of shm_base_virt from tlb_base_virt */
385 u32 ul_shm_offset_virt; 382 u32 ul_shm_offset_virt;
383 s32 entry_ndx;
384 s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
386 struct cfg_hostres *resources = NULL; 385 struct cfg_hostres *resources = NULL;
387 u32 temp; 386 u32 temp;
388 u32 ul_dsp_clk_rate; 387 u32 ul_dsp_clk_rate;
@@ -394,6 +393,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
394 struct omap_dsp_platform_data *pdata = 393 struct omap_dsp_platform_data *pdata =
395 omap_dspbridge_dev->dev.platform_data; 394 omap_dspbridge_dev->dev.platform_data;
396 395
396 mmu = dev_context->dsp_mmu;
397 /* The device context contains all the mmu setup info from when the 397 /* The device context contains all the mmu setup info from when the
398 * last dsp base image was loaded. The first entry is always 398 * last dsp base image was loaded. The first entry is always
399 * SHMMEM base. */ 399 * SHMMEM base. */
@@ -403,12 +403,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
403 ul_shm_base_virt *= DSPWORDSIZE; 403 ul_shm_base_virt *= DSPWORDSIZE;
404 DBC_ASSERT(ul_shm_base_virt != 0); 404 DBC_ASSERT(ul_shm_base_virt != 0);
405 /* DSP Virtual address */ 405 /* DSP Virtual address */
406 ul_tlb_base_virt = dev_context->sh_s.seg0_da; 406 ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
407 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 407 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
408 ul_shm_offset_virt = 408 ul_shm_offset_virt =
409 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 409 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
410 /* Kernel logical address */ 410 /* Kernel logical address */
411 ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; 411 ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
412 412
413 DBC_ASSERT(ul_shm_base != 0); 413 DBC_ASSERT(ul_shm_base != 0);
414 /* 2nd wd is used as sync field */ 414 /* 2nd wd is used as sync field */
@@ -443,70 +443,25 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
443 OMAP343X_CONTROL_IVA2_BOOTMOD)); 443 OMAP343X_CONTROL_IVA2_BOOTMOD));
444 } 444 }
445 } 445 }
446
447 if (!status) {
448 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
449 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
450 mmu = dev_context->dsp_mmu;
451 if (mmu)
452 iommu_put(mmu);
453 mmu = iommu_get("iva2");
454 if (IS_ERR(mmu)) {
455 dev_err(bridge, "iommu_get failed!\n");
456 dev_context->dsp_mmu = NULL;
457 status = (int)mmu;
458 }
459 }
460 if (!status) { 446 if (!status) {
461 dev_context->dsp_mmu = mmu; 447 /* Only make TLB entry if both addresses are non-zero */
462 sm_sg = &dev_context->sh_s; 448 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
463 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, 449 entry_ndx++) {
464 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 450 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
465 if (IS_ERR_VALUE(sg0_da)) { 451
466 status = (int)sg0_da; 452 if (!e->ul_gpp_pa || !e->ul_dsp_va)
467 sg0_da = 0;
468 }
469 }
470 if (!status) {
471 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
472 sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
473 if (IS_ERR_VALUE(sg1_da)) {
474 status = (int)sg1_da;
475 sg1_da = 0;
476 }
477 }
478 if (!status) {
479 u32 da;
480 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
481 if (!tlb[tlb_i].ul_gpp_pa)
482 continue; 453 continue;
483 454
484 dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" 455 dev_dbg(bridge,
485 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, 456 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
486 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); 457 itmp_entry_ndx,
458 e->ul_gpp_pa,
459 e->ul_dsp_va,
460 e->ul_size);
487 461
488 da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, 462 iommu_kmap(mmu, e->ul_dsp_va, e->ul_gpp_pa, e->ul_size,
489 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
490 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 463 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
491 if (IS_ERR_VALUE(da)) { 464 itmp_entry_ndx++;
492 status = (int)da;
493 break;
494 }
495 }
496 }
497 if (!status) {
498 u32 da;
499 l4_i = 0;
500 while (l4_peripheral_table[l4_i].phys_addr) {
501 da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
502 dsp_virt_addr, l4_peripheral_table[l4_i].
503 phys_addr, PAGE_SIZE,
504 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
505 if (IS_ERR_VALUE(da)) {
506 status = (int)da;
507 break;
508 }
509 l4_i++;
510 } 465 }
511 } 466 }
512 467
@@ -619,23 +574,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
619 574
620 /* update board state */ 575 /* update board state */
621 dev_context->dw_brd_state = BRD_RUNNING; 576 dev_context->dw_brd_state = BRD_RUNNING;
622 return 0; 577 /* (void)chnlsm_enable_interrupt(dev_context); */
623 } else { 578 } else {
624 dev_context->dw_brd_state = BRD_UNKNOWN; 579 dev_context->dw_brd_state = BRD_UNKNOWN;
625 } 580 }
626 } 581 }
627
628 while (tlb_i--) {
629 if (!tlb[tlb_i].ul_gpp_pa)
630 continue;
631 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
632 }
633 while (l4_i--)
634 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
635 if (sg0_da)
636 iommu_kunmap(mmu, sg0_da);
637 if (sg1_da)
638 iommu_kunmap(mmu, sg1_da);
639 return status; 582 return status;
640} 583}
641 584
@@ -653,8 +596,6 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
653 struct bridge_dev_context *dev_context = dev_ctxt; 596 struct bridge_dev_context *dev_context = dev_ctxt;
654 struct pg_table_attrs *pt_attrs; 597 struct pg_table_attrs *pt_attrs;
655 u32 dsp_pwr_state; 598 u32 dsp_pwr_state;
656 int i;
657 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
658 struct omap_dsp_platform_data *pdata = 599 struct omap_dsp_platform_data *pdata =
659 omap_dspbridge_dev->dev.platform_data; 600 omap_dspbridge_dev->dev.platform_data;
660 601
@@ -698,37 +639,17 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
698 memset((u8 *) pt_attrs->pg_info, 0x00, 639 memset((u8 *) pt_attrs->pg_info, 0x00,
699 (pt_attrs->l2_num_pages * sizeof(struct page_info))); 640 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
700 } 641 }
701 /* Reset DSP */
702 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
703 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
704
705 /* Disable the mailbox interrupts */ 642 /* Disable the mailbox interrupts */
706 if (dev_context->mbox) { 643 if (dev_context->mbox) {
707 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); 644 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
708 omap_mbox_put(dev_context->mbox); 645 omap_mbox_put(dev_context->mbox);
709 dev_context->mbox = NULL; 646 dev_context->mbox = NULL;
710 } 647 }
711 if (dev_context->dsp_mmu) { 648 if (dev_context->dsp_mmu)
712 pr_err("Proc stop mmu if statement\n"); 649 dev_context->dsp_mmu = (iommu_put(dev_context->dsp_mmu), NULL);
713 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { 650 /* Reset IVA2 clocks*/
714 if (!tlb[i].ul_gpp_pa) 651 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
715 continue; 652 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
716 iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
717 }
718 i = 0;
719 while (l4_peripheral_table[i].phys_addr) {
720 iommu_kunmap(dev_context->dsp_mmu,
721 l4_peripheral_table[i].dsp_virt_addr);
722 i++;
723 }
724 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
725 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
726 iommu_put(dev_context->dsp_mmu);
727 dev_context->dsp_mmu = NULL;
728 }
729 /* Reset IVA IOMMU*/
730 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
731 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
732 653
733 dsp_clock_disable_all(dev_context->dsp_per_clks); 654 dsp_clock_disable_all(dev_context->dsp_per_clks);
734 dsp_clk_disable(DSP_CLK_IVA2); 655 dsp_clk_disable(DSP_CLK_IVA2);
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 5ba679577354..ba2961049dad 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,9 +134,10 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
134 134
135 if (!status) { 135 if (!status) {
136 ul_tlb_base_virt = 136 ul_tlb_base_virt =
137 dev_context->sh_s.seg0_da * DSPWORDSIZE; 137 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; 139 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].ul_gpp_va;
140 141
141 if (!trace_read) { 142 if (!trace_read) {
142 ul_shm_offset_virt = 143 ul_shm_offset_virt =
@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
317 ret = -EPERM; 318 ret = -EPERM;
318 319
319 if (!ret) { 320 if (!ret) {
320 ul_tlb_base_virt = dev_context->sh_s.seg0_da * 321 ul_tlb_base_virt =
321 DSPWORDSIZE; 322 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
322
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 324
325 if (symbols_reloaded) { 325 if (symbols_reloaded) {
@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
337 ul_shm_base_virt - ul_tlb_base_virt; 337 ul_shm_base_virt - ul_tlb_base_virt;
338 if (trace_load) { 338 if (trace_load) {
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->sh_s.seg0_va; 340 dev_context->atlb_entry[0].ul_gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=