aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFelipe Contreras <felipe.contreras@gmail.com>2010-11-10 13:12:45 -0500
committerOmar Ramirez Luna <omar.ramirez@ti.com>2010-11-10 19:34:44 -0500
commit50ad26f4c9710a64c3728f08c3fa6f4b6a869376 (patch)
tree95a64de3ee25de2be2f9c04cac743da0f29c03e5 /drivers
parent1cf3fb2d359a87880a6a6e0cb25b2ec2d493b119 (diff)
Revert "staging: tidspbridge: replace iommu custom for opensource implementation"
This reverts commit d95ec7e2fd5cebf2f1caf3f572fa5e0a820ac5b1. Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com> Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h4
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c121
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c516
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c8
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h2
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c2
6 files changed, 518 insertions, 135 deletions
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index a42c3931fc7e..1c1f157e167a 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,6 @@
23#include <plat/clockdomain.h> 23#include <plat/clockdomain.h>
24#include <mach-omap2/prm-regbits-34xx.h> 24#include <mach-omap2/prm-regbits-34xx.h>
25#include <mach-omap2/cm-regbits-34xx.h> 25#include <mach-omap2/cm-regbits-34xx.h>
26#include <plat/iommu.h>
27#include <plat/iovmm.h>
28#include <dspbridge/devdefs.h> 26#include <dspbridge/devdefs.h>
29#include <hw_defs.h> 27#include <hw_defs.h>
30#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
@@ -330,7 +328,7 @@ struct bridge_dev_context {
330 u32 dw_internal_size; /* Internal memory size */ 328 u32 dw_internal_size; /* Internal memory size */
331 329
332 struct omap_mbox *mbox; /* Mail box handle */ 330 struct omap_mbox *mbox; /* Mail box handle */
333 struct iommu *dsp_mmu; /* iommu for iva2 handler */ 331
334 struct cfg_hostres *resources; /* Host Resources */ 332 struct cfg_hostres *resources; /* Host Resources */
335 333
336 /* 334 /*
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 842b8dbc441a..571864555ddd 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -291,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
291 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
292 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
293 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
294 struct iommu *mmu;
295 u32 ul_shm_base; 294 u32 ul_shm_base;
296 u32 ul_shm_base_offset; 295 u32 ul_shm_base_offset;
297 u32 ul_shm_limit; 296 u32 ul_shm_limit;
@@ -314,6 +313,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
314 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; 313 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
315 struct cfg_hostres *host_res; 314 struct cfg_hostres *host_res;
316 struct bridge_dev_context *pbridge_context; 315 struct bridge_dev_context *pbridge_context;
316 u32 map_attrs;
317 u32 shm0_end; 317 u32 shm0_end;
318 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
319 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
@@ -337,20 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
337 status = -EFAULT; 337 status = -EFAULT;
338 goto func_end; 338 goto func_end;
339 } 339 }
340 mmu = pbridge_context->dsp_mmu;
341
342 if (mmu)
343 iommu_put(mmu);
344 mmu = iommu_get("iva2");
345
346 if (IS_ERR_OR_NULL(mmu)) {
347 dev_err(bridge, "iommu_get failed!\n");
348 pbridge_context->dsp_mmu = NULL;
349 status = -EFAULT;
350 goto func_end;
351 }
352 pbridge_context->dsp_mmu = mmu;
353
354 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
355 if (!cod_man) { 341 if (!cod_man) {
356 status = -EFAULT; 342 status = -EFAULT;
@@ -490,16 +476,55 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
490 gpp_va_curr = ul_gpp_va; 476 gpp_va_curr = ul_gpp_va;
491 num_bytes = ul_seg1_size; 477 num_bytes = ul_seg1_size;
492 478
493 va_curr = iommu_kmap(mmu, va_curr, pa_curr, num_bytes, 479 /*
494 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 480 * Try to fit into TLB entries. If not possible, push them to page
495 if (IS_ERR_VALUE(va_curr)) { 481 * tables. It is quite possible that if sections are not on
496 status = (int)va_curr; 482 * bigger page boundary, we may end up making several small pages.
497 goto func_end; 483 * So, push them onto page tables, if that is the case.
498 } 484 */
485 map_attrs = 0x00000000;
486 map_attrs = DSP_MAPLITTLEENDIAN;
487 map_attrs |= DSP_MAPPHYSICALADDR;
488 map_attrs |= DSP_MAPELEMSIZE32;
489 map_attrs |= DSP_MAPDONOTLOCK;
499 490
500 pa_curr += ul_pad_size + num_bytes; 491 while (num_bytes) {
501 va_curr += ul_pad_size + num_bytes; 492 /*
502 gpp_va_curr += ul_pad_size + num_bytes; 493 * To find the max. page size with which both PA & VA are
494 * aligned.
495 */
496 all_bits = pa_curr | va_curr;
497 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits, pa_curr, va_curr,
499 num_bytes);
500 for (i = 0; i < 4; i++) {
501 if ((num_bytes >= page_size[i]) && ((all_bits &
502 (page_size[i] -
503 1)) == 0)) {
504 status =
505 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context,
507 pa_curr, va_curr,
508 page_size[i], map_attrs,
509 NULL);
510 if (status)
511 goto func_end;
512 pa_curr += page_size[i];
513 va_curr += page_size[i];
514 gpp_va_curr += page_size[i];
515 num_bytes -= page_size[i];
516 /*
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
519 * size.
520 */
521 break;
522 }
523 }
524 }
525 pa_curr += ul_pad_size;
526 va_curr += ul_pad_size;
527 gpp_va_curr += ul_pad_size;
503 528
504 /* Configure the TLB entries for the next cacheable segment */ 529 /* Configure the TLB entries for the next cacheable segment */
505 num_bytes = ul_seg_size; 530 num_bytes = ul_seg_size;
@@ -541,6 +566,22 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
541 ae_proc[ndx].ul_dsp_va * 566 ae_proc[ndx].ul_dsp_va *
542 hio_mgr->word_size, page_size[i]); 567 hio_mgr->word_size, page_size[i]);
543 ndx++; 568 ndx++;
569 } else {
570 status =
571 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context,
573 pa_curr, va_curr,
574 page_size[i], map_attrs,
575 NULL);
576 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa,
580 ae_proc[ndx].ul_gpp_va,
581 ae_proc[ndx].ul_dsp_va *
582 hio_mgr->word_size, page_size[i]);
583 if (status)
584 goto func_end;
544 } 585 }
545 pa_curr += page_size[i]; 586 pa_curr += page_size[i];
546 va_curr += page_size[i]; 587 va_curr += page_size[i];
@@ -593,29 +634,37 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
593 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
594 ae_proc[ndx].ul_dsp_va); 635 ae_proc[ndx].ul_dsp_va);
595 ndx++; 636 ndx++;
637 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639 (hio_mgr->hbridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs,
644 NULL);
596 } 645 }
597 } 646 }
598 if (status) 647 if (status)
599 goto func_end; 648 goto func_end;
600 } 649 }
601 650
651 map_attrs = 0x00000000;
652 map_attrs = DSP_MAPLITTLEENDIAN;
653 map_attrs |= DSP_MAPPHYSICALADDR;
654 map_attrs |= DSP_MAPELEMSIZE32;
655 map_attrs |= DSP_MAPDONOTLOCK;
656
602 /* Map the L4 peripherals */ 657 /* Map the L4 peripherals */
603 i = 0; 658 i = 0;
604 while (l4_peripheral_table[i].phys_addr) { 659 while (l4_peripheral_table[i].phys_addr) {
605 status = iommu_kmap(mmu, l4_peripheral_table[i]. 660 status = hio_mgr->intf_fxns->pfn_brd_mem_map
606 dsp_virt_addr, l4_peripheral_table[i].phys_addr, 661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
607 PAGE_SIZE, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
608 if (IS_ERR_VALUE(status)) 663 map_attrs, NULL);
609 break; 664 if (status)
665 goto func_end;
610 i++; 666 i++;
611 } 667 }
612 if (IS_ERR_VALUE(status)) {
613 while (i--)
614 iommu_kunmap(mmu, l4_peripheral_table[i].
615 dsp_virt_addr);
616 goto func_end;
617 }
618 status = 0;
619 668
620 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
621 ae_proc[i].ul_dsp_va = 0; 670 ae_proc[i].ul_dsp_va = 0;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index ec85529efee0..1be081f917a7 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -114,7 +114,7 @@ static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
114 u32 ul_num_bytes, u32 ul_map_attr, 114 u32 ul_num_bytes, u32 ul_map_attr,
115 struct page **mapped_pages); 115 struct page **mapped_pages);
116static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, 116static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
117 u32 da); 117 u32 virt_addr, u32 ul_num_bytes);
118static int bridge_dev_create(struct bridge_dev_context 118static int bridge_dev_create(struct bridge_dev_context
119 **dev_cntxt, 119 **dev_cntxt,
120 struct dev_object *hdev_obj, 120 struct dev_object *hdev_obj,
@@ -122,8 +122,6 @@ static int bridge_dev_create(struct bridge_dev_context
122static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 122static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
123 u32 dw_cmd, void *pargs); 123 u32 dw_cmd, void *pargs);
124static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 124static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
125static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
126 struct page **usr_pgs);
127static u32 user_va2_pa(struct mm_struct *mm, u32 address); 125static u32 user_va2_pa(struct mm_struct *mm, u32 address);
128static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, 126static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
129 u32 va, u32 size, 127 u32 va, u32 size,
@@ -373,7 +371,6 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
373{ 371{
374 int status = 0; 372 int status = 0;
375 struct bridge_dev_context *dev_context = dev_ctxt; 373 struct bridge_dev_context *dev_context = dev_ctxt;
376 struct iommu *mmu;
377 u32 dw_sync_addr = 0; 374 u32 dw_sync_addr = 0;
378 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 375 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
379 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 376 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
@@ -393,7 +390,6 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
393 struct omap_dsp_platform_data *pdata = 390 struct omap_dsp_platform_data *pdata =
394 omap_dspbridge_dev->dev.platform_data; 391 omap_dspbridge_dev->dev.platform_data;
395 392
396 mmu = dev_context->dsp_mmu;
397 /* The device context contains all the mmu setup info from when the 393 /* The device context contains all the mmu setup info from when the
398 * last dsp base image was loaded. The first entry is always 394 * last dsp base image was loaded. The first entry is always
399 * SHMMEM base. */ 395 * SHMMEM base. */
@@ -444,10 +440,29 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
444 } 440 }
445 } 441 }
446 if (!status) { 442 if (!status) {
443 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
444 * IVA2 SYSC register */
445 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
446 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
447 udelay(100);
448 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
449 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
450 udelay(100);
451
452 /* Disbale the DSP MMU */
453 hw_mmu_disable(resources->dw_dmmu_base);
454 /* Disable TWL */
455 hw_mmu_twl_disable(resources->dw_dmmu_base);
456
447 /* Only make TLB entry if both addresses are non-zero */ 457 /* Only make TLB entry if both addresses are non-zero */
448 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; 458 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
449 entry_ndx++) { 459 entry_ndx++) {
450 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; 460 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
461 struct hw_mmu_map_attrs_t map_attrs = {
462 .endianism = e->endianism,
463 .element_size = e->elem_size,
464 .mixed_size = e->mixed_mode,
465 };
451 466
452 if (!e->ul_gpp_pa || !e->ul_dsp_va) 467 if (!e->ul_gpp_pa || !e->ul_dsp_va)
453 continue; 468 continue;
@@ -459,8 +474,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
459 e->ul_dsp_va, 474 e->ul_dsp_va,
460 e->ul_size); 475 e->ul_size);
461 476
462 iommu_kmap(mmu, e->ul_dsp_va, e->ul_gpp_pa, e->ul_size, 477 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
463 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 478 e->ul_gpp_pa,
479 e->ul_dsp_va,
480 e->ul_size,
481 itmp_entry_ndx,
482 &map_attrs, 1, 1);
483
464 itmp_entry_ndx++; 484 itmp_entry_ndx++;
465 } 485 }
466 } 486 }
@@ -468,13 +488,29 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
468 /* Lock the above TLB entries and get the BIOS and load monitor timer 488 /* Lock the above TLB entries and get the BIOS and load monitor timer
469 * information */ 489 * information */
470 if (!status) { 490 if (!status) {
491 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
492 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
493 hw_mmu_ttb_set(resources->dw_dmmu_base,
494 dev_context->pt_attrs->l1_base_pa);
495 hw_mmu_twl_enable(resources->dw_dmmu_base);
496 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
497
498 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
499 temp = (temp & 0xFFFFFFEF) | 0x11;
500 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
501
502 /* Let the DSP MMU run */
503 hw_mmu_enable(resources->dw_dmmu_base);
504
471 /* Enable the BIOS clock */ 505 /* Enable the BIOS clock */
472 (void)dev_get_symbol(dev_context->hdev_obj, 506 (void)dev_get_symbol(dev_context->hdev_obj,
473 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 507 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
474 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->hdev_obj,
475 BRIDGEINIT_LOADMON_GPTIMER, 509 BRIDGEINIT_LOADMON_GPTIMER,
476 &ul_load_monitor_timer); 510 &ul_load_monitor_timer);
511 }
477 512
513 if (!status) {
478 if (ul_load_monitor_timer != 0xFFFF) { 514 if (ul_load_monitor_timer != 0xFFFF) {
479 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 515 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
480 ul_load_monitor_timer; 516 ul_load_monitor_timer;
@@ -483,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
483 dev_dbg(bridge, "Not able to get the symbol for Load " 519 dev_dbg(bridge, "Not able to get the symbol for Load "
484 "Monitor Timer\n"); 520 "Monitor Timer\n");
485 } 521 }
522 }
486 523
524 if (!status) {
487 if (ul_bios_gp_timer != 0xFFFF) { 525 if (ul_bios_gp_timer != 0xFFFF) {
488 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 526 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
489 ul_bios_gp_timer; 527 ul_bios_gp_timer;
@@ -492,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
492 dev_dbg(bridge, 530 dev_dbg(bridge,
493 "Not able to get the symbol for BIOS Timer\n"); 531 "Not able to get the symbol for BIOS Timer\n");
494 } 532 }
533 }
495 534
535 if (!status) {
496 /* Set the DSP clock rate */ 536 /* Set the DSP clock rate */
497 (void)dev_get_symbol(dev_context->hdev_obj, 537 (void)dev_get_symbol(dev_context->hdev_obj,
498 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 538 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -545,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
545 585
546 /* Let DSP go */ 586 /* Let DSP go */
547 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
588 /* Enable DSP MMU Interrupts */
589 hw_mmu_event_enable(resources->dw_dmmu_base,
590 HW_MMU_ALL_INTERRUPTS);
548 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
549 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
550 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 593 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -645,8 +688,6 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
645 omap_mbox_put(dev_context->mbox); 688 omap_mbox_put(dev_context->mbox);
646 dev_context->mbox = NULL; 689 dev_context->mbox = NULL;
647 } 690 }
648 if (dev_context->dsp_mmu)
649 dev_context->dsp_mmu = (iommu_put(dev_context->dsp_mmu), NULL);
650 /* Reset IVA2 clocks*/ 691 /* Reset IVA2 clocks*/
651 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | 692 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
652 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 693 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -1095,81 +1136,217 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1095 * 1136 *
1096 * TODO: Disable MMU while updating the page tables (but that'll stall DSP) 1137 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1097 */ 1138 */
1098static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctx, 1139static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1099 u32 uva, u32 da, u32 size, u32 attr, 1140 u32 ul_mpu_addr, u32 virt_addr,
1100 struct page **usr_pgs) 1141 u32 ul_num_bytes, u32 ul_map_attr,
1101 1142 struct page **mapped_pages)
1102{ 1143{
1103 int res, w; 1144 u32 attrs;
1104 unsigned pages, i; 1145 int status = 0;
1105 struct iommu *mmu = dev_ctx->dsp_mmu; 1146 struct bridge_dev_context *dev_context = dev_ctxt;
1147 struct hw_mmu_map_attrs_t hw_attrs;
1106 struct vm_area_struct *vma; 1148 struct vm_area_struct *vma;
1107 struct mm_struct *mm = current->mm; 1149 struct mm_struct *mm = current->mm;
1108 struct sg_table *sgt; 1150 u32 write = 0;
1109 struct scatterlist *sg; 1151 u32 num_usr_pgs = 0;
1110 1152 struct page *mapped_page, *pg;
1111 if (!size || !usr_pgs) 1153 s32 pg_num;
1154 u32 va = virt_addr;
1155 struct task_struct *curr_task = current;
1156 u32 pg_i = 0;
1157 u32 mpu_addr, pa;
1158
1159 dev_dbg(bridge,
1160 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1161 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1162 ul_map_attr);
1163 if (ul_num_bytes == 0)
1112 return -EINVAL; 1164 return -EINVAL;
1113 1165
1114 pages = size / PG_SIZE4K; 1166 if (ul_map_attr & DSP_MAP_DIR_MASK) {
1167 attrs = ul_map_attr;
1168 } else {
1169 /* Assign default attributes */
1170 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1171 }
1172 /* Take mapping properties */
1173 if (attrs & DSP_MAPBIGENDIAN)
1174 hw_attrs.endianism = HW_BIG_ENDIAN;
1175 else
1176 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1177
1178 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1179 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1180 /* Ignore element_size if mixed_size is enabled */
1181 if (hw_attrs.mixed_size == 0) {
1182 if (attrs & DSP_MAPELEMSIZE8) {
1183 /* Size is 8 bit */
1184 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1185 } else if (attrs & DSP_MAPELEMSIZE16) {
1186 /* Size is 16 bit */
1187 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1188 } else if (attrs & DSP_MAPELEMSIZE32) {
1189 /* Size is 32 bit */
1190 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1191 } else if (attrs & DSP_MAPELEMSIZE64) {
1192 /* Size is 64 bit */
1193 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1194 } else {
1195 /*
1196 * Mixedsize isn't enabled, so size can't be
1197 * zero here
1198 */
1199 return -EINVAL;
1200 }
1201 }
1202 if (attrs & DSP_MAPDONOTLOCK)
1203 hw_attrs.donotlockmpupage = 1;
1204 else
1205 hw_attrs.donotlockmpupage = 0;
1115 1206
1207 if (attrs & DSP_MAPVMALLOCADDR) {
1208 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1209 ul_num_bytes, &hw_attrs);
1210 }
1211 /*
1212 * Do OS-specific user-va to pa translation.
1213 * Combine physically contiguous regions to reduce TLBs.
1214 * Pass the translated pa to pte_update.
1215 */
1216 if ((attrs & DSP_MAPPHYSICALADDR)) {
1217 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1218 ul_num_bytes, &hw_attrs);
1219 goto func_cont;
1220 }
1221
1222 /*
1223 * Important Note: ul_mpu_addr is mapped from user application process
1224 * to current process - it must lie completely within the current
1225 * virtual memory address space in order to be of use to us here!
1226 */
1116 down_read(&mm->mmap_sem); 1227 down_read(&mm->mmap_sem);
1117 vma = find_vma(mm, uva); 1228 vma = find_vma(mm, ul_mpu_addr);
1118 while (vma && (uva + size > vma->vm_end)) 1229 if (vma)
1119 vma = find_vma(mm, vma->vm_end + 1); 1230 dev_dbg(bridge,
1231 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1232 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1233 ul_num_bytes, vma->vm_start, vma->vm_end,
1234 vma->vm_flags);
1120 1235
1236 /*
1237 * It is observed that under some circumstances, the user buffer is
1238 * spread across several VMAs. So loop through and check if the entire
1239 * user buffer is covered
1240 */
1241 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1242 /* jump to the next VMA region */
1243 vma = find_vma(mm, vma->vm_end + 1);
1244 dev_dbg(bridge,
1245 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1246 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1247 ul_num_bytes, vma->vm_start, vma->vm_end,
1248 vma->vm_flags);
1249 }
1121 if (!vma) { 1250 if (!vma) {
1122 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", 1251 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1123 __func__, uva, size); 1252 __func__, ul_mpu_addr, ul_num_bytes);
1253 status = -EINVAL;
1124 up_read(&mm->mmap_sem); 1254 up_read(&mm->mmap_sem);
1125 return -EINVAL; 1255 goto func_cont;
1126 } 1256 }
1127 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1128 w = 1;
1129
1130 if (vma->vm_flags & VM_IO)
1131 i = get_io_pages(mm, uva, pages, usr_pgs);
1132 else
1133 i = get_user_pages(current, mm, uva, pages, w, 1,
1134 usr_pgs, NULL);
1135 up_read(&mm->mmap_sem);
1136 1257
1137 if (i < 0) 1258 if (vma->vm_flags & VM_IO) {
1138 return i; 1259 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1260 mpu_addr = ul_mpu_addr;
1261
1262 /* Get the physical addresses for user buffer */
1263 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1264 pa = user_va2_pa(mm, mpu_addr);
1265 if (!pa) {
1266 status = -EPERM;
1267 pr_err("DSPBRIDGE: VM_IO mapping physical"
1268 "address is invalid\n");
1269 break;
1270 }
1271 if (pfn_valid(__phys_to_pfn(pa))) {
1272 pg = PHYS_TO_PAGE(pa);
1273 get_page(pg);
1274 if (page_count(pg) < 1) {
1275 pr_err("Bad page in VM_IO buffer\n");
1276 bad_page_dump(pa, pg);
1277 }
1278 }
1279 status = pte_set(dev_context->pt_attrs, pa,
1280 va, HW_PAGE_SIZE4KB, &hw_attrs);
1281 if (status)
1282 break;
1139 1283
1140 if (i < pages) { 1284 va += HW_PAGE_SIZE4KB;
1141 res = -EFAULT; 1285 mpu_addr += HW_PAGE_SIZE4KB;
1142 goto err_pages; 1286 pa += HW_PAGE_SIZE4KB;
1287 }
1288 } else {
1289 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1290 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1291 write = 1;
1292
1293 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1294 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1295 write, 1, &mapped_page, NULL);
1296 if (pg_num > 0) {
1297 if (page_count(mapped_page) < 1) {
1298 pr_err("Bad page count after doing"
1299 "get_user_pages on"
1300 "user buffer\n");
1301 bad_page_dump(page_to_phys(mapped_page),
1302 mapped_page);
1303 }
1304 status = pte_set(dev_context->pt_attrs,
1305 page_to_phys(mapped_page), va,
1306 HW_PAGE_SIZE4KB, &hw_attrs);
1307 if (status)
1308 break;
1309
1310 if (mapped_pages)
1311 mapped_pages[pg_i] = mapped_page;
1312
1313 va += HW_PAGE_SIZE4KB;
1314 ul_mpu_addr += HW_PAGE_SIZE4KB;
1315 } else {
1316 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1317 "MPU addr = 0x%x,"
1318 "vma->vm_flags = 0x%lx,"
1319 "get_user_pages Err"
1320 "Value = %d, Buffer"
1321 "size=0x%x\n", ul_mpu_addr,
1322 vma->vm_flags, pg_num, ul_num_bytes);
1323 status = -EPERM;
1324 break;
1325 }
1326 }
1143 } 1327 }
1144 1328 up_read(&mm->mmap_sem);
1145 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 1329func_cont:
1146 if (!sgt) { 1330 if (status) {
1147 res = -ENOMEM; 1331 /*
1148 goto err_pages; 1332 * Roll out the mapped pages incase it failed in middle of
1333 * mapping
1334 */
1335 if (pg_i) {
1336 bridge_brd_mem_un_map(dev_context, virt_addr,
1337 (pg_i * PG_SIZE4K));
1338 }
1339 status = -EPERM;
1149 } 1340 }
1150 1341 /*
1151 res = sg_alloc_table(sgt, pages, GFP_KERNEL); 1342 * In any case, flush the TLB
1152 1343 * This is called from here instead from pte_update to avoid unnecessary
1153 if (res < 0) 1344 * repetition while mapping non-contiguous physical regions of a virtual
1154 goto err_sg; 1345 * region
1155 1346 */
1156 for_each_sg(sgt->sgl, sg, sgt->nents, i) 1347 flush_all(dev_context);
1157 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); 1348 dev_dbg(bridge, "%s status %x\n", __func__, status);
1158 1349 return status;
1159 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
1160
1161 if (!IS_ERR_VALUE(da))
1162 return 0;
1163 res = (int)da;
1164
1165 sg_free_table(sgt);
1166err_sg:
1167 kfree(sgt);
1168 i = pages;
1169err_pages:
1170 while (i--)
1171 put_page(usr_pgs[i]);
1172 return res;
1173} 1350}
1174 1351
1175/* 1352/*
@@ -1180,43 +1357,194 @@ err_pages:
1180 * So, instead of looking up the PTE address for every 4K block, 1357 * So, instead of looking up the PTE address for every 4K block,
1181 * we clear consecutive PTEs until we unmap all the bytes 1358 * we clear consecutive PTEs until we unmap all the bytes
1182 */ 1359 */
1183static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctx, u32 da) 1360static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1361 u32 virt_addr, u32 ul_num_bytes)
1184{ 1362{
1185 unsigned i; 1363 u32 l1_base_va;
1186 struct sg_table *sgt; 1364 u32 l2_base_va;
1187 struct scatterlist *sg; 1365 u32 l2_base_pa;
1366 u32 l2_page_num;
1367 u32 pte_val;
1368 u32 pte_size;
1369 u32 pte_count;
1370 u32 pte_addr_l1;
1371 u32 pte_addr_l2 = 0;
1372 u32 rem_bytes;
1373 u32 rem_bytes_l2;
1374 u32 va_curr;
1375 struct page *pg = NULL;
1376 int status = 0;
1377 struct bridge_dev_context *dev_context = dev_ctxt;
1378 struct pg_table_attrs *pt = dev_context->pt_attrs;
1379 u32 temp;
1380 u32 paddr;
1381 u32 numof4k_pages = 0;
1188 1382
1189 sgt = iommu_vunmap(dev_ctx->dsp_mmu, da); 1383 va_curr = virt_addr;
1190 if (!sgt) 1384 rem_bytes = ul_num_bytes;
1191 return -EFAULT; 1385 rem_bytes_l2 = 0;
1386 l1_base_va = pt->l1_base_va;
1387 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1388 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1389 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1390 ul_num_bytes, l1_base_va, pte_addr_l1);
1391
1392 while (rem_bytes && !status) {
1393 u32 va_curr_orig = va_curr;
1394 /* Find whether the L1 PTE points to a valid L2 PT */
1395 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1396 pte_val = *(u32 *) pte_addr_l1;
1397 pte_size = hw_mmu_pte_size_l1(pte_val);
1192 1398
1193 for_each_sg(sgt->sgl, sg, sgt->nents, i) 1399 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1194 put_page(sg_page(sg)); 1400 goto skip_coarse_page;
1195 sg_free_table(sgt);
1196 kfree(sgt);
1197 1401
1198 return 0; 1402 /*
1199} 1403 * Get the L2 PA from the L1 PTE, and find
1404 * corresponding L2 VA
1405 */
1406 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1407 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1408 l2_page_num =
1409 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1410 /*
1411 * Find the L2 PTE address from which we will start
1412 * clearing, the number of PTEs to be cleared on this
1413 * page, and the size of VA space that needs to be
1414 * cleared on this L2 page
1415 */
1416 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1417 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1418 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1419 if (rem_bytes < (pte_count * PG_SIZE4K))
1420 pte_count = rem_bytes / PG_SIZE4K;
1421 rem_bytes_l2 = pte_count * PG_SIZE4K;
1200 1422
1423 /*
1424 * Unmap the VA space on this L2 PT. A quicker way
1425 * would be to clear pte_count entries starting from
1426 * pte_addr_l2. However, below code checks that we don't
1427 * clear invalid entries or less than 64KB for a 64KB
1428 * entry. Similar checking is done for L1 PTEs too
1429 * below
1430 */
1431 while (rem_bytes_l2 && !status) {
1432 pte_val = *(u32 *) pte_addr_l2;
1433 pte_size = hw_mmu_pte_size_l2(pte_val);
1434 /* va_curr aligned to pte_size? */
1435 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1436 va_curr & (pte_size - 1)) {
1437 status = -EPERM;
1438 break;
1439 }
1201 1440
1202static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, 1441 /* Collect Physical addresses from VA */
1203 struct page **usr_pgs) 1442 paddr = (pte_val & ~(pte_size - 1));
1204{ 1443 if (pte_size == HW_PAGE_SIZE64KB)
1205 u32 pa; 1444 numof4k_pages = 16;
1206 int i; 1445 else
1207 struct page *pg; 1446 numof4k_pages = 1;
1447 temp = 0;
1448 while (temp++ < numof4k_pages) {
1449 if (!pfn_valid(__phys_to_pfn(paddr))) {
1450 paddr += HW_PAGE_SIZE4KB;
1451 continue;
1452 }
1453 pg = PHYS_TO_PAGE(paddr);
1454 if (page_count(pg) < 1) {
1455 pr_info("DSPBRIDGE: UNMAP function: "
1456 "COUNT 0 FOR PA 0x%x, size = "
1457 "0x%x\n", paddr, ul_num_bytes);
1458 bad_page_dump(paddr, pg);
1459 } else {
1460 set_page_dirty(pg);
1461 page_cache_release(pg);
1462 }
1463 paddr += HW_PAGE_SIZE4KB;
1464 }
1465 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1466 status = -EPERM;
1467 goto EXIT_LOOP;
1468 }
1208 1469
1209 for (i = 0; i < pages; i++) { 1470 status = 0;
1210 pa = user_va2_pa(mm, uva); 1471 rem_bytes_l2 -= pte_size;
1472 va_curr += pte_size;
1473 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1474 }
1475 spin_lock(&pt->pg_lock);
1476 if (rem_bytes_l2 == 0) {
1477 pt->pg_info[l2_page_num].num_entries -= pte_count;
1478 if (pt->pg_info[l2_page_num].num_entries == 0) {
1479 /*
1480 * Clear the L1 PTE pointing to the L2 PT
1481 */
1482 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1483 HW_MMU_COARSE_PAGE_SIZE))
1484 status = 0;
1485 else {
1486 status = -EPERM;
1487 spin_unlock(&pt->pg_lock);
1488 goto EXIT_LOOP;
1489 }
1490 }
1491 rem_bytes -= pte_count * PG_SIZE4K;
1492 } else
1493 status = -EPERM;
1211 1494
1212 if (!pfn_valid(__phys_to_pfn(pa))) 1495 spin_unlock(&pt->pg_lock);
1496 continue;
1497skip_coarse_page:
1498 /* va_curr aligned to pte_size? */
1499 /* pte_size = 1 MB or 16 MB */
1500 if (pte_size == 0 || rem_bytes < pte_size ||
1501 va_curr & (pte_size - 1)) {
1502 status = -EPERM;
1213 break; 1503 break;
1504 }
1214 1505
1215 pg = PHYS_TO_PAGE(pa); 1506 if (pte_size == HW_PAGE_SIZE1MB)
1216 usr_pgs[i] = pg; 1507 numof4k_pages = 256;
1217 get_page(pg); 1508 else
1509 numof4k_pages = 4096;
1510 temp = 0;
1511 /* Collect Physical addresses from VA */
1512 paddr = (pte_val & ~(pte_size - 1));
1513 while (temp++ < numof4k_pages) {
1514 if (pfn_valid(__phys_to_pfn(paddr))) {
1515 pg = PHYS_TO_PAGE(paddr);
1516 if (page_count(pg) < 1) {
1517 pr_info("DSPBRIDGE: UNMAP function: "
1518 "COUNT 0 FOR PA 0x%x, size = "
1519 "0x%x\n", paddr, ul_num_bytes);
1520 bad_page_dump(paddr, pg);
1521 } else {
1522 set_page_dirty(pg);
1523 page_cache_release(pg);
1524 }
1525 }
1526 paddr += HW_PAGE_SIZE4KB;
1527 }
1528 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1529 status = 0;
1530 rem_bytes -= pte_size;
1531 va_curr += pte_size;
1532 } else {
1533 status = -EPERM;
1534 goto EXIT_LOOP;
1535 }
1218 } 1536 }
1219 return i; 1537 /*
1538 * It is better to flush the TLB here, so that any stale old entries
1539 * get flushed
1540 */
1541EXIT_LOOP:
1542 flush_all(dev_context);
1543 dev_dbg(bridge,
1544 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1545 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1546 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1547 return status;
1220} 1548}
1221 1549
1222/* 1550/*
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index 14f319134357..3430418190da 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -115,6 +115,12 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
115 /* Fill in context structure */ 115 /* Fill in context structure */
116 deh->hbridge_context = hbridge_context; 116 deh->hbridge_context = hbridge_context;
117 117
118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
120 "DspBridge\tiommu fault", deh);
121 if (status < 0)
122 goto err;
123
118 *ret_deh = deh; 124 *ret_deh = deh;
119 return 0; 125 return 0;
120 126
@@ -134,6 +140,8 @@ int bridge_deh_destroy(struct deh_mgr *deh)
134 ntfy_delete(deh->ntfy_obj); 140 ntfy_delete(deh->ntfy_obj);
135 kfree(deh->ntfy_obj); 141 kfree(deh->ntfy_obj);
136 } 142 }
143 /* Disable DSP MMU fault */
144 free_irq(INT_DSP_MMU_IRQ, deh);
137 145
138 /* Free DPC object */ 146 /* Free DPC object */
139 tasklet_kill(&deh->dpc_tasklet); 147 tasklet_kill(&deh->dpc_tasklet);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 173dfbb42019..0ae7d1646a1b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -201,7 +201,7 @@ typedef int(*fxn_brd_memmap) (struct bridge_dev_context
201 */ 201 */
202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context 202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
203 * dev_ctxt, 203 * dev_ctxt,
204 u32 da); 204 u32 virt_addr, u32 ul_num_bytes);
205 205
206/* 206/*
207 * ======== bridge_brd_stop ======== 207 * ======== bridge_brd_stop ========
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 97c5b61f1014..b47d7aa747b1 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -1723,7 +1723,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
1723 /* Remove mapping from the page tables. */ 1723 /* Remove mapping from the page tables. */
1724 if (!status) { 1724 if (!status) {
1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) 1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align); 1726 (p_proc_object->hbridge_context, va_align, size_align);
1727 } 1727 }
1728 1728
1729 mutex_unlock(&proc_lock); 1729 mutex_unlock(&proc_lock);