aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFernando Guzman Lugo <x0095840@ti.com>2010-09-30 22:12:48 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-10-05 11:54:40 -0400
commitd95ec7e2fd5cebf2f1caf3f572fa5e0a820ac5b1 (patch)
tree97f8fab31c0377de0699b81c8f92ff5287c695d9
parent0ee5ab30d19c8e82fdf725c1f67cf71bb67c8373 (diff)
staging: tidspbridge: replace iommu custom for opensource implementation
Now the tidspbridge uses the API's from iovmm module. Signed-off-by: Fernando Guzman Lugo <x0095840@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h4
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c121
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c516
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c8
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h2
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c2
6 files changed, 135 insertions, 518 deletions
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 1c1f157e167a..a42c3931fc7e 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,6 +23,8 @@
23#include <plat/clockdomain.h> 23#include <plat/clockdomain.h>
24#include <mach-omap2/prm-regbits-34xx.h> 24#include <mach-omap2/prm-regbits-34xx.h>
25#include <mach-omap2/cm-regbits-34xx.h> 25#include <mach-omap2/cm-regbits-34xx.h>
26#include <plat/iommu.h>
27#include <plat/iovmm.h>
26#include <dspbridge/devdefs.h> 28#include <dspbridge/devdefs.h>
27#include <hw_defs.h> 29#include <hw_defs.h>
28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 30#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
@@ -328,7 +330,7 @@ struct bridge_dev_context {
328 u32 dw_internal_size; /* Internal memory size */ 330 u32 dw_internal_size; /* Internal memory size */
329 331
330 struct omap_mbox *mbox; /* Mail box handle */ 332 struct omap_mbox *mbox; /* Mail box handle */
331 333 struct iommu *dsp_mmu; /* iommu for iva2 handler */
332 struct cfg_hostres *resources; /* Host Resources */ 334 struct cfg_hostres *resources; /* Host Resources */
333 335
334 /* 336 /*
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 571864555ddd..842b8dbc441a 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -291,6 +291,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
291 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
292 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
293 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
294 struct iommu *mmu;
294 u32 ul_shm_base; 295 u32 ul_shm_base;
295 u32 ul_shm_base_offset; 296 u32 ul_shm_base_offset;
296 u32 ul_shm_limit; 297 u32 ul_shm_limit;
@@ -313,7 +314,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
313 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; 314 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
314 struct cfg_hostres *host_res; 315 struct cfg_hostres *host_res;
315 struct bridge_dev_context *pbridge_context; 316 struct bridge_dev_context *pbridge_context;
316 u32 map_attrs;
317 u32 shm0_end; 317 u32 shm0_end;
318 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
319 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
@@ -337,6 +337,20 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
337 status = -EFAULT; 337 status = -EFAULT;
338 goto func_end; 338 goto func_end;
339 } 339 }
340 mmu = pbridge_context->dsp_mmu;
341
342 if (mmu)
343 iommu_put(mmu);
344 mmu = iommu_get("iva2");
345
346 if (IS_ERR_OR_NULL(mmu)) {
347 dev_err(bridge, "iommu_get failed!\n");
348 pbridge_context->dsp_mmu = NULL;
349 status = -EFAULT;
350 goto func_end;
351 }
352 pbridge_context->dsp_mmu = mmu;
353
340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 354 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
341 if (!cod_man) { 355 if (!cod_man) {
342 status = -EFAULT; 356 status = -EFAULT;
@@ -476,55 +490,16 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
476 gpp_va_curr = ul_gpp_va; 490 gpp_va_curr = ul_gpp_va;
477 num_bytes = ul_seg1_size; 491 num_bytes = ul_seg1_size;
478 492
479 /* 493 va_curr = iommu_kmap(mmu, va_curr, pa_curr, num_bytes,
480 * Try to fit into TLB entries. If not possible, push them to page 494 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
481 * tables. It is quite possible that if sections are not on 495 if (IS_ERR_VALUE(va_curr)) {
482 * bigger page boundary, we may end up making several small pages. 496 status = (int)va_curr;
483 * So, push them onto page tables, if that is the case. 497 goto func_end;
484 */
485 map_attrs = 0x00000000;
486 map_attrs = DSP_MAPLITTLEENDIAN;
487 map_attrs |= DSP_MAPPHYSICALADDR;
488 map_attrs |= DSP_MAPELEMSIZE32;
489 map_attrs |= DSP_MAPDONOTLOCK;
490
491 while (num_bytes) {
492 /*
493 * To find the max. page size with which both PA & VA are
494 * aligned.
495 */
496 all_bits = pa_curr | va_curr;
497 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits, pa_curr, va_curr,
499 num_bytes);
500 for (i = 0; i < 4; i++) {
501 if ((num_bytes >= page_size[i]) && ((all_bits &
502 (page_size[i] -
503 1)) == 0)) {
504 status =
505 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context,
507 pa_curr, va_curr,
508 page_size[i], map_attrs,
509 NULL);
510 if (status)
511 goto func_end;
512 pa_curr += page_size[i];
513 va_curr += page_size[i];
514 gpp_va_curr += page_size[i];
515 num_bytes -= page_size[i];
516 /*
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
519 * size.
520 */
521 break;
522 }
523 }
524 } 498 }
525 pa_curr += ul_pad_size; 499
526 va_curr += ul_pad_size; 500 pa_curr += ul_pad_size + num_bytes;
527 gpp_va_curr += ul_pad_size; 501 va_curr += ul_pad_size + num_bytes;
502 gpp_va_curr += ul_pad_size + num_bytes;
528 503
529 /* Configure the TLB entries for the next cacheable segment */ 504 /* Configure the TLB entries for the next cacheable segment */
530 num_bytes = ul_seg_size; 505 num_bytes = ul_seg_size;
@@ -566,22 +541,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
566 ae_proc[ndx].ul_dsp_va * 541 ae_proc[ndx].ul_dsp_va *
567 hio_mgr->word_size, page_size[i]); 542 hio_mgr->word_size, page_size[i]);
568 ndx++; 543 ndx++;
569 } else {
570 status =
571 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context,
573 pa_curr, va_curr,
574 page_size[i], map_attrs,
575 NULL);
576 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa,
580 ae_proc[ndx].ul_gpp_va,
581 ae_proc[ndx].ul_dsp_va *
582 hio_mgr->word_size, page_size[i]);
583 if (status)
584 goto func_end;
585 } 544 }
586 pa_curr += page_size[i]; 545 pa_curr += page_size[i];
587 va_curr += page_size[i]; 546 va_curr += page_size[i];
@@ -634,37 +593,29 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 593 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
635 ae_proc[ndx].ul_dsp_va); 594 ae_proc[ndx].ul_dsp_va);
636 ndx++; 595 ndx++;
637 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639 (hio_mgr->hbridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs,
644 NULL);
645 } 596 }
646 } 597 }
647 if (status) 598 if (status)
648 goto func_end; 599 goto func_end;
649 } 600 }
650 601
651 map_attrs = 0x00000000;
652 map_attrs = DSP_MAPLITTLEENDIAN;
653 map_attrs |= DSP_MAPPHYSICALADDR;
654 map_attrs |= DSP_MAPELEMSIZE32;
655 map_attrs |= DSP_MAPDONOTLOCK;
656
657 /* Map the L4 peripherals */ 602 /* Map the L4 peripherals */
658 i = 0; 603 i = 0;
659 while (l4_peripheral_table[i].phys_addr) { 604 while (l4_peripheral_table[i].phys_addr) {
660 status = hio_mgr->intf_fxns->pfn_brd_mem_map 605 status = iommu_kmap(mmu, l4_peripheral_table[i].
661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, 606 dsp_virt_addr, l4_peripheral_table[i].phys_addr,
662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, 607 PAGE_SIZE, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
663 map_attrs, NULL); 608 if (IS_ERR_VALUE(status))
664 if (status) 609 break;
665 goto func_end;
666 i++; 610 i++;
667 } 611 }
612 if (IS_ERR_VALUE(status)) {
613 while (i--)
614 iommu_kunmap(mmu, l4_peripheral_table[i].
615 dsp_virt_addr);
616 goto func_end;
617 }
618 status = 0;
668 619
669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 620 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
670 ae_proc[i].ul_dsp_va = 0; 621 ae_proc[i].ul_dsp_va = 0;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index e32e98a65a59..eb0fa2838d8f 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -100,7 +100,7 @@ static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
100 u32 ul_num_bytes, u32 ul_map_attr, 100 u32 ul_num_bytes, u32 ul_map_attr,
101 struct page **mapped_pages); 101 struct page **mapped_pages);
102static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, 102static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
103 u32 virt_addr, u32 ul_num_bytes); 103 u32 da);
104static int bridge_dev_create(struct bridge_dev_context 104static int bridge_dev_create(struct bridge_dev_context
105 **dev_cntxt, 105 **dev_cntxt,
106 struct dev_object *hdev_obj, 106 struct dev_object *hdev_obj,
@@ -108,6 +108,8 @@ static int bridge_dev_create(struct bridge_dev_context
108static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 108static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
109 u32 dw_cmd, void *pargs); 109 u32 dw_cmd, void *pargs);
110static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 110static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
111static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
112 struct page **usr_pgs);
111static u32 user_va2_pa(struct mm_struct *mm, u32 address); 113static u32 user_va2_pa(struct mm_struct *mm, u32 address);
112static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, 114static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
113 u32 va, u32 size, 115 u32 va, u32 size,
@@ -357,6 +359,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
357{ 359{
358 int status = 0; 360 int status = 0;
359 struct bridge_dev_context *dev_context = dev_ctxt; 361 struct bridge_dev_context *dev_context = dev_ctxt;
362 struct iommu *mmu;
360 u32 dw_sync_addr = 0; 363 u32 dw_sync_addr = 0;
361 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 364 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
362 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 365 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
@@ -376,6 +379,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
376 struct dspbridge_platform_data *pdata = 379 struct dspbridge_platform_data *pdata =
377 omap_dspbridge_dev->dev.platform_data; 380 omap_dspbridge_dev->dev.platform_data;
378 381
382 mmu = dev_context->dsp_mmu;
379 /* The device context contains all the mmu setup info from when the 383 /* The device context contains all the mmu setup info from when the
380 * last dsp base image was loaded. The first entry is always 384 * last dsp base image was loaded. The first entry is always
381 * SHMMEM base. */ 385 * SHMMEM base. */
@@ -426,29 +430,10 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
426 } 430 }
427 } 431 }
428 if (!status) { 432 if (!status) {
429 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
430 * IVA2 SYSC register */
431 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
432 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
433 udelay(100);
434 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
435 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
436 udelay(100);
437
438 /* Disbale the DSP MMU */
439 hw_mmu_disable(resources->dw_dmmu_base);
440 /* Disable TWL */
441 hw_mmu_twl_disable(resources->dw_dmmu_base);
442
443 /* Only make TLB entry if both addresses are non-zero */ 433 /* Only make TLB entry if both addresses are non-zero */
444 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; 434 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
445 entry_ndx++) { 435 entry_ndx++) {
446 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; 436 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
447 struct hw_mmu_map_attrs_t map_attrs = {
448 .endianism = e->endianism,
449 .element_size = e->elem_size,
450 .mixed_size = e->mixed_mode,
451 };
452 437
453 if (!e->ul_gpp_pa || !e->ul_dsp_va) 438 if (!e->ul_gpp_pa || !e->ul_dsp_va)
454 continue; 439 continue;
@@ -460,13 +445,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
460 e->ul_dsp_va, 445 e->ul_dsp_va,
461 e->ul_size); 446 e->ul_size);
462 447
463 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, 448 iommu_kmap(mmu, e->ul_dsp_va, e->ul_gpp_pa, e->ul_size,
464 e->ul_gpp_pa, 449 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
465 e->ul_dsp_va,
466 e->ul_size,
467 itmp_entry_ndx,
468 &map_attrs, 1, 1);
469
470 itmp_entry_ndx++; 450 itmp_entry_ndx++;
471 } 451 }
472 } 452 }
@@ -474,29 +454,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
474 /* Lock the above TLB entries and get the BIOS and load monitor timer 454 /* Lock the above TLB entries and get the BIOS and load monitor timer
475 * information */ 455 * information */
476 if (!status) { 456 if (!status) {
477 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
478 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
479 hw_mmu_ttb_set(resources->dw_dmmu_base,
480 dev_context->pt_attrs->l1_base_pa);
481 hw_mmu_twl_enable(resources->dw_dmmu_base);
482 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
483
484 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
485 temp = (temp & 0xFFFFFFEF) | 0x11;
486 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
487
488 /* Let the DSP MMU run */
489 hw_mmu_enable(resources->dw_dmmu_base);
490
491 /* Enable the BIOS clock */ 457 /* Enable the BIOS clock */
492 (void)dev_get_symbol(dev_context->hdev_obj, 458 (void)dev_get_symbol(dev_context->hdev_obj,
493 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 459 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
494 (void)dev_get_symbol(dev_context->hdev_obj, 460 (void)dev_get_symbol(dev_context->hdev_obj,
495 BRIDGEINIT_LOADMON_GPTIMER, 461 BRIDGEINIT_LOADMON_GPTIMER,
496 &ul_load_monitor_timer); 462 &ul_load_monitor_timer);
497 }
498 463
499 if (!status) {
500 if (ul_load_monitor_timer != 0xFFFF) { 464 if (ul_load_monitor_timer != 0xFFFF) {
501 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 465 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
502 ul_load_monitor_timer; 466 ul_load_monitor_timer;
@@ -505,9 +469,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
505 dev_dbg(bridge, "Not able to get the symbol for Load " 469 dev_dbg(bridge, "Not able to get the symbol for Load "
506 "Monitor Timer\n"); 470 "Monitor Timer\n");
507 } 471 }
508 }
509 472
510 if (!status) {
511 if (ul_bios_gp_timer != 0xFFFF) { 473 if (ul_bios_gp_timer != 0xFFFF) {
512 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 474 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
513 ul_bios_gp_timer; 475 ul_bios_gp_timer;
@@ -516,9 +478,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
516 dev_dbg(bridge, 478 dev_dbg(bridge,
517 "Not able to get the symbol for BIOS Timer\n"); 479 "Not able to get the symbol for BIOS Timer\n");
518 } 480 }
519 }
520 481
521 if (!status) {
522 /* Set the DSP clock rate */ 482 /* Set the DSP clock rate */
523 (void)dev_get_symbol(dev_context->hdev_obj, 483 (void)dev_get_symbol(dev_context->hdev_obj,
524 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 484 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -571,9 +531,6 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
571 531
572 /* Let DSP go */ 532 /* Let DSP go */
573 dev_dbg(bridge, "%s Unreset\n", __func__); 533 dev_dbg(bridge, "%s Unreset\n", __func__);
574 /* Enable DSP MMU Interrupts */
575 hw_mmu_event_enable(resources->dw_dmmu_base,
576 HW_MMU_ALL_INTERRUPTS);
577 /* release the RST1, DSP starts executing now .. */ 534 /* release the RST1, DSP starts executing now .. */
578 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 535 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
579 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 536 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -674,6 +631,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
674 omap_mbox_put(dev_context->mbox); 631 omap_mbox_put(dev_context->mbox);
675 dev_context->mbox = NULL; 632 dev_context->mbox = NULL;
676 } 633 }
634 if (dev_context->dsp_mmu)
635 dev_context->dsp_mmu = (iommu_put(dev_context->dsp_mmu), NULL);
677 /* Reset IVA2 clocks*/ 636 /* Reset IVA2 clocks*/
678 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | 637 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
679 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 638 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -1122,217 +1081,81 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1122 * 1081 *
1123 * TODO: Disable MMU while updating the page tables (but that'll stall DSP) 1082 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1124 */ 1083 */
1125static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, 1084static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctx,
1126 u32 ul_mpu_addr, u32 virt_addr, 1085 u32 uva, u32 da, u32 size, u32 attr,
1127 u32 ul_num_bytes, u32 ul_map_attr, 1086 struct page **usr_pgs)
1128 struct page **mapped_pages) 1087
1129{ 1088{
1130 u32 attrs; 1089 int res, w;
1131 int status = 0; 1090 unsigned pages, i;
1132 struct bridge_dev_context *dev_context = dev_ctxt; 1091 struct iommu *mmu = dev_ctx->dsp_mmu;
1133 struct hw_mmu_map_attrs_t hw_attrs;
1134 struct vm_area_struct *vma; 1092 struct vm_area_struct *vma;
1135 struct mm_struct *mm = current->mm; 1093 struct mm_struct *mm = current->mm;
1136 u32 write = 0; 1094 struct sg_table *sgt;
1137 u32 num_usr_pgs = 0; 1095 struct scatterlist *sg;
1138 struct page *mapped_page, *pg;
1139 s32 pg_num;
1140 u32 va = virt_addr;
1141 struct task_struct *curr_task = current;
1142 u32 pg_i = 0;
1143 u32 mpu_addr, pa;
1144
1145 dev_dbg(bridge,
1146 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1147 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1148 ul_map_attr);
1149 if (ul_num_bytes == 0)
1150 return -EINVAL;
1151 1096
1152 if (ul_map_attr & DSP_MAP_DIR_MASK) { 1097 if (!size || !usr_pgs)
1153 attrs = ul_map_attr; 1098 return -EINVAL;
1154 } else {
1155 /* Assign default attributes */
1156 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1157 }
1158 /* Take mapping properties */
1159 if (attrs & DSP_MAPBIGENDIAN)
1160 hw_attrs.endianism = HW_BIG_ENDIAN;
1161 else
1162 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1163
1164 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1165 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1166 /* Ignore element_size if mixed_size is enabled */
1167 if (hw_attrs.mixed_size == 0) {
1168 if (attrs & DSP_MAPELEMSIZE8) {
1169 /* Size is 8 bit */
1170 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1171 } else if (attrs & DSP_MAPELEMSIZE16) {
1172 /* Size is 16 bit */
1173 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1174 } else if (attrs & DSP_MAPELEMSIZE32) {
1175 /* Size is 32 bit */
1176 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1177 } else if (attrs & DSP_MAPELEMSIZE64) {
1178 /* Size is 64 bit */
1179 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1180 } else {
1181 /*
1182 * Mixedsize isn't enabled, so size can't be
1183 * zero here
1184 */
1185 return -EINVAL;
1186 }
1187 }
1188 if (attrs & DSP_MAPDONOTLOCK)
1189 hw_attrs.donotlockmpupage = 1;
1190 else
1191 hw_attrs.donotlockmpupage = 0;
1192 1099
1193 if (attrs & DSP_MAPVMALLOCADDR) { 1100 pages = size / PG_SIZE4K;
1194 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1195 ul_num_bytes, &hw_attrs);
1196 }
1197 /*
1198 * Do OS-specific user-va to pa translation.
1199 * Combine physically contiguous regions to reduce TLBs.
1200 * Pass the translated pa to pte_update.
1201 */
1202 if ((attrs & DSP_MAPPHYSICALADDR)) {
1203 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1204 ul_num_bytes, &hw_attrs);
1205 goto func_cont;
1206 }
1207 1101
1208 /*
1209 * Important Note: ul_mpu_addr is mapped from user application process
1210 * to current process - it must lie completely within the current
1211 * virtual memory address space in order to be of use to us here!
1212 */
1213 down_read(&mm->mmap_sem); 1102 down_read(&mm->mmap_sem);
1214 vma = find_vma(mm, ul_mpu_addr); 1103 vma = find_vma(mm, uva);
1215 if (vma) 1104 while (vma && (uva + size > vma->vm_end))
1216 dev_dbg(bridge,
1217 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1218 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1219 ul_num_bytes, vma->vm_start, vma->vm_end,
1220 vma->vm_flags);
1221
1222 /*
1223 * It is observed that under some circumstances, the user buffer is
1224 * spread across several VMAs. So loop through and check if the entire
1225 * user buffer is covered
1226 */
1227 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1228 /* jump to the next VMA region */
1229 vma = find_vma(mm, vma->vm_end + 1); 1105 vma = find_vma(mm, vma->vm_end + 1);
1230 dev_dbg(bridge, 1106
1231 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1232 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1233 ul_num_bytes, vma->vm_start, vma->vm_end,
1234 vma->vm_flags);
1235 }
1236 if (!vma) { 1107 if (!vma) {
1237 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", 1108 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1238 __func__, ul_mpu_addr, ul_num_bytes); 1109 __func__, uva, size);
1239 status = -EINVAL;
1240 up_read(&mm->mmap_sem); 1110 up_read(&mm->mmap_sem);
1241 goto func_cont; 1111 return -EINVAL;
1242 } 1112 }
1113 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1114 w = 1;
1243 1115
1244 if (vma->vm_flags & VM_IO) { 1116 if (vma->vm_flags & VM_IO)
1245 num_usr_pgs = ul_num_bytes / PG_SIZE4K; 1117 i = get_io_pages(mm, uva, pages, usr_pgs);
1246 mpu_addr = ul_mpu_addr; 1118 else
1247 1119 i = get_user_pages(current, mm, uva, pages, w, 1,
1248 /* Get the physical addresses for user buffer */ 1120 usr_pgs, NULL);
1249 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { 1121 up_read(&mm->mmap_sem);
1250 pa = user_va2_pa(mm, mpu_addr);
1251 if (!pa) {
1252 status = -EPERM;
1253 pr_err("DSPBRIDGE: VM_IO mapping physical"
1254 "address is invalid\n");
1255 break;
1256 }
1257 if (pfn_valid(__phys_to_pfn(pa))) {
1258 pg = PHYS_TO_PAGE(pa);
1259 get_page(pg);
1260 if (page_count(pg) < 1) {
1261 pr_err("Bad page in VM_IO buffer\n");
1262 bad_page_dump(pa, pg);
1263 }
1264 }
1265 status = pte_set(dev_context->pt_attrs, pa,
1266 va, HW_PAGE_SIZE4KB, &hw_attrs);
1267 if (status)
1268 break;
1269 1122
1270 va += HW_PAGE_SIZE4KB; 1123 if (i < 0)
1271 mpu_addr += HW_PAGE_SIZE4KB; 1124 return i;
1272 pa += HW_PAGE_SIZE4KB; 1125
1273 } 1126 if (i < pages) {
1274 } else { 1127 res = -EFAULT;
1275 num_usr_pgs = ul_num_bytes / PG_SIZE4K; 1128 goto err_pages;
1276 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1277 write = 1;
1278
1279 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1280 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1281 write, 1, &mapped_page, NULL);
1282 if (pg_num > 0) {
1283 if (page_count(mapped_page) < 1) {
1284 pr_err("Bad page count after doing"
1285 "get_user_pages on"
1286 "user buffer\n");
1287 bad_page_dump(page_to_phys(mapped_page),
1288 mapped_page);
1289 }
1290 status = pte_set(dev_context->pt_attrs,
1291 page_to_phys(mapped_page), va,
1292 HW_PAGE_SIZE4KB, &hw_attrs);
1293 if (status)
1294 break;
1295
1296 if (mapped_pages)
1297 mapped_pages[pg_i] = mapped_page;
1298
1299 va += HW_PAGE_SIZE4KB;
1300 ul_mpu_addr += HW_PAGE_SIZE4KB;
1301 } else {
1302 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1303 "MPU addr = 0x%x,"
1304 "vma->vm_flags = 0x%lx,"
1305 "get_user_pages Err"
1306 "Value = %d, Buffer"
1307 "size=0x%x\n", ul_mpu_addr,
1308 vma->vm_flags, pg_num, ul_num_bytes);
1309 status = -EPERM;
1310 break;
1311 }
1312 }
1313 } 1129 }
1314 up_read(&mm->mmap_sem); 1130
1315func_cont: 1131 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1316 if (status) { 1132 if (!sgt) {
1317 /* 1133 res = -ENOMEM;
1318 * Roll out the mapped pages incase it failed in middle of 1134 goto err_pages;
1319 * mapping
1320 */
1321 if (pg_i) {
1322 bridge_brd_mem_un_map(dev_context, virt_addr,
1323 (pg_i * PG_SIZE4K));
1324 }
1325 status = -EPERM;
1326 } 1135 }
1327 /* 1136
1328 * In any case, flush the TLB 1137 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
1329 * This is called from here instead from pte_update to avoid unnecessary 1138
1330 * repetition while mapping non-contiguous physical regions of a virtual 1139 if (res < 0)
1331 * region 1140 goto err_sg;
1332 */ 1141
1333 flush_all(dev_context); 1142 for_each_sg(sgt->sgl, sg, sgt->nents, i)
1334 dev_dbg(bridge, "%s status %x\n", __func__, status); 1143 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
1335 return status; 1144
1145 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
1146
1147 if (!IS_ERR_VALUE(da))
1148 return 0;
1149 res = (int)da;
1150
1151 sg_free_table(sgt);
1152err_sg:
1153 kfree(sgt);
1154 i = pages;
1155err_pages:
1156 while (i--)
1157 put_page(usr_pgs[i]);
1158 return res;
1336} 1159}
1337 1160
1338/* 1161/*
@@ -1343,194 +1166,43 @@ func_cont:
1343 * So, instead of looking up the PTE address for every 4K block, 1166 * So, instead of looking up the PTE address for every 4K block,
1344 * we clear consecutive PTEs until we unmap all the bytes 1167 * we clear consecutive PTEs until we unmap all the bytes
1345 */ 1168 */
1346static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, 1169static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctx, u32 da)
1347 u32 virt_addr, u32 ul_num_bytes)
1348{ 1170{
1349 u32 l1_base_va; 1171 unsigned i;
1350 u32 l2_base_va; 1172 struct sg_table *sgt;
1351 u32 l2_base_pa; 1173 struct scatterlist *sg;
1352 u32 l2_page_num;
1353 u32 pte_val;
1354 u32 pte_size;
1355 u32 pte_count;
1356 u32 pte_addr_l1;
1357 u32 pte_addr_l2 = 0;
1358 u32 rem_bytes;
1359 u32 rem_bytes_l2;
1360 u32 va_curr;
1361 struct page *pg = NULL;
1362 int status = 0;
1363 struct bridge_dev_context *dev_context = dev_ctxt;
1364 struct pg_table_attrs *pt = dev_context->pt_attrs;
1365 u32 temp;
1366 u32 paddr;
1367 u32 numof4k_pages = 0;
1368
1369 va_curr = virt_addr;
1370 rem_bytes = ul_num_bytes;
1371 rem_bytes_l2 = 0;
1372 l1_base_va = pt->l1_base_va;
1373 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1374 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1375 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1376 ul_num_bytes, l1_base_va, pte_addr_l1);
1377 1174
1378 while (rem_bytes && !status) { 1175 sgt = iommu_vunmap(dev_ctx->dsp_mmu, da);
1379 u32 va_curr_orig = va_curr; 1176 if (!sgt)
1380 /* Find whether the L1 PTE points to a valid L2 PT */ 1177 return -EFAULT;
1381 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1382 pte_val = *(u32 *) pte_addr_l1;
1383 pte_size = hw_mmu_pte_size_l1(pte_val);
1384 1178
1385 if (pte_size != HW_MMU_COARSE_PAGE_SIZE) 1179 for_each_sg(sgt->sgl, sg, sgt->nents, i)
1386 goto skip_coarse_page; 1180 put_page(sg_page(sg));
1181 sg_free_table(sgt);
1182 kfree(sgt);
1387 1183
1388 /* 1184 return 0;
1389 * Get the L2 PA from the L1 PTE, and find 1185}
1390 * corresponding L2 VA
1391 */
1392 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1393 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1394 l2_page_num =
1395 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1396 /*
1397 * Find the L2 PTE address from which we will start
1398 * clearing, the number of PTEs to be cleared on this
1399 * page, and the size of VA space that needs to be
1400 * cleared on this L2 page
1401 */
1402 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1403 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1404 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1405 if (rem_bytes < (pte_count * PG_SIZE4K))
1406 pte_count = rem_bytes / PG_SIZE4K;
1407 rem_bytes_l2 = pte_count * PG_SIZE4K;
1408 1186
1409 /*
1410 * Unmap the VA space on this L2 PT. A quicker way
1411 * would be to clear pte_count entries starting from
1412 * pte_addr_l2. However, below code checks that we don't
1413 * clear invalid entries or less than 64KB for a 64KB
1414 * entry. Similar checking is done for L1 PTEs too
1415 * below
1416 */
1417 while (rem_bytes_l2 && !status) {
1418 pte_val = *(u32 *) pte_addr_l2;
1419 pte_size = hw_mmu_pte_size_l2(pte_val);
1420 /* va_curr aligned to pte_size? */
1421 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1422 va_curr & (pte_size - 1)) {
1423 status = -EPERM;
1424 break;
1425 }
1426 1187
1427 /* Collect Physical addresses from VA */ 1188static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
1428 paddr = (pte_val & ~(pte_size - 1)); 1189 struct page **usr_pgs)
1429 if (pte_size == HW_PAGE_SIZE64KB) 1190{
1430 numof4k_pages = 16; 1191 u32 pa;
1431 else 1192 int i;
1432 numof4k_pages = 1; 1193 struct page *pg;
1433 temp = 0;
1434 while (temp++ < numof4k_pages) {
1435 if (!pfn_valid(__phys_to_pfn(paddr))) {
1436 paddr += HW_PAGE_SIZE4KB;
1437 continue;
1438 }
1439 pg = PHYS_TO_PAGE(paddr);
1440 if (page_count(pg) < 1) {
1441 pr_info("DSPBRIDGE: UNMAP function: "
1442 "COUNT 0 FOR PA 0x%x, size = "
1443 "0x%x\n", paddr, ul_num_bytes);
1444 bad_page_dump(paddr, pg);
1445 } else {
1446 set_page_dirty(pg);
1447 page_cache_release(pg);
1448 }
1449 paddr += HW_PAGE_SIZE4KB;
1450 }
1451 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1452 status = -EPERM;
1453 goto EXIT_LOOP;
1454 }
1455 1194
1456 status = 0; 1195 for (i = 0; i < pages; i++) {
1457 rem_bytes_l2 -= pte_size; 1196 pa = user_va2_pa(mm, uva);
1458 va_curr += pte_size;
1459 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1460 }
1461 spin_lock(&pt->pg_lock);
1462 if (rem_bytes_l2 == 0) {
1463 pt->pg_info[l2_page_num].num_entries -= pte_count;
1464 if (pt->pg_info[l2_page_num].num_entries == 0) {
1465 /*
1466 * Clear the L1 PTE pointing to the L2 PT
1467 */
1468 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1469 HW_MMU_COARSE_PAGE_SIZE))
1470 status = 0;
1471 else {
1472 status = -EPERM;
1473 spin_unlock(&pt->pg_lock);
1474 goto EXIT_LOOP;
1475 }
1476 }
1477 rem_bytes -= pte_count * PG_SIZE4K;
1478 } else
1479 status = -EPERM;
1480 1197
1481 spin_unlock(&pt->pg_lock); 1198 if (!pfn_valid(__phys_to_pfn(pa)))
1482 continue;
1483skip_coarse_page:
1484 /* va_curr aligned to pte_size? */
1485 /* pte_size = 1 MB or 16 MB */
1486 if (pte_size == 0 || rem_bytes < pte_size ||
1487 va_curr & (pte_size - 1)) {
1488 status = -EPERM;
1489 break; 1199 break;
1490 }
1491 1200
1492 if (pte_size == HW_PAGE_SIZE1MB) 1201 pg = PHYS_TO_PAGE(pa);
1493 numof4k_pages = 256; 1202 usr_pgs[i] = pg;
1494 else 1203 get_page(pg);
1495 numof4k_pages = 4096;
1496 temp = 0;
1497 /* Collect Physical addresses from VA */
1498 paddr = (pte_val & ~(pte_size - 1));
1499 while (temp++ < numof4k_pages) {
1500 if (pfn_valid(__phys_to_pfn(paddr))) {
1501 pg = PHYS_TO_PAGE(paddr);
1502 if (page_count(pg) < 1) {
1503 pr_info("DSPBRIDGE: UNMAP function: "
1504 "COUNT 0 FOR PA 0x%x, size = "
1505 "0x%x\n", paddr, ul_num_bytes);
1506 bad_page_dump(paddr, pg);
1507 } else {
1508 set_page_dirty(pg);
1509 page_cache_release(pg);
1510 }
1511 }
1512 paddr += HW_PAGE_SIZE4KB;
1513 }
1514 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1515 status = 0;
1516 rem_bytes -= pte_size;
1517 va_curr += pte_size;
1518 } else {
1519 status = -EPERM;
1520 goto EXIT_LOOP;
1521 }
1522 } 1204 }
1523 /* 1205 return i;
1524 * It is better to flush the TLB here, so that any stale old entries
1525 * get flushed
1526 */
1527EXIT_LOOP:
1528 flush_all(dev_context);
1529 dev_dbg(bridge,
1530 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1531 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1532 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1533 return status;
1534} 1206}
1535 1207
1536/* 1208/*
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index 3430418190da..14f319134357 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -115,12 +115,6 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
115 /* Fill in context structure */ 115 /* Fill in context structure */
116 deh->hbridge_context = hbridge_context; 116 deh->hbridge_context = hbridge_context;
117 117
118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
120 "DspBridge\tiommu fault", deh);
121 if (status < 0)
122 goto err;
123
124 *ret_deh = deh; 118 *ret_deh = deh;
125 return 0; 119 return 0;
126 120
@@ -140,8 +134,6 @@ int bridge_deh_destroy(struct deh_mgr *deh)
140 ntfy_delete(deh->ntfy_obj); 134 ntfy_delete(deh->ntfy_obj);
141 kfree(deh->ntfy_obj); 135 kfree(deh->ntfy_obj);
142 } 136 }
143 /* Disable DSP MMU fault */
144 free_irq(INT_DSP_MMU_IRQ, deh);
145 137
146 /* Free DPC object */ 138 /* Free DPC object */
147 tasklet_kill(&deh->dpc_tasklet); 139 tasklet_kill(&deh->dpc_tasklet);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 0ae7d1646a1b..173dfbb42019 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -201,7 +201,7 @@ typedef int(*fxn_brd_memmap) (struct bridge_dev_context
201 */ 201 */
202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context 202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
203 * dev_ctxt, 203 * dev_ctxt,
204 u32 virt_addr, u32 ul_num_bytes); 204 u32 da);
205 205
206/* 206/*
207 * ======== bridge_brd_stop ======== 207 * ======== bridge_brd_stop ========
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index b47d7aa747b1..97c5b61f1014 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -1723,7 +1723,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
1723 /* Remove mapping from the page tables. */ 1723 /* Remove mapping from the page tables. */
1724 if (!status) { 1724 if (!status) {
1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) 1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align, size_align); 1726 (p_proc_object->hbridge_context, va_align);
1727 } 1727 }
1728 1728
1729 mutex_unlock(&proc_lock); 1729 mutex_unlock(&proc_lock);