aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFelipe Contreras <felipe.contreras@gmail.com>2010-11-10 13:11:59 -0500
committerOmar Ramirez Luna <omar.ramirez@ti.com>2010-11-10 19:34:43 -0500
commitac8a139a14db73f96f7b79765c3a9e34d16bdb95 (patch)
tree7370ad63aac92b07d0d0bf139f8abd8fc3ffbe51 /drivers
parent6c4c899ee27963357a7df1f5e15a5677978cd842 (diff)
Revert "staging: tidspbridge - remove custom mmu code from tiomap3430.c"
This reverts commit e7396e77d9e4230bf725b5807732cbca191d111f. Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com> Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c425
2 files changed, 427 insertions, 0 deletions
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 394a64a7ba21..877749258acb 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -330,6 +330,7 @@ struct bridge_dev_context {
330 */ 330 */
331 u32 dw_dsp_ext_base_addr; /* See the comment above */ 331 u32 dw_dsp_ext_base_addr; /* See the comment above */
332 u32 dw_api_reg_base; /* API mem map'd registers */ 332 u32 dw_api_reg_base; /* API mem map'd registers */
333 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
333 u32 dw_api_clk_base; /* CLK Registers */ 334 u32 dw_api_clk_base; /* CLK Registers */
334 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 335 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
335 u32 dw_public_rhea; /* Pub Rhea */ 336 u32 dw_public_rhea; /* Pub Rhea */
@@ -355,6 +356,7 @@ struct bridge_dev_context {
355 356
356 /* TC Settings */ 357 /* TC Settings */
357 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 358 bool tc_word_swap_on; /* Traffic Controller Word Swap */
359 struct pg_table_attrs *pt_attrs;
358 u32 dsp_per_clks; 360 u32 dsp_per_clks;
359}; 361};
360 362
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index d643c2ba9dc1..a342feed42f6 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -116,8 +116,56 @@ static int bridge_dev_create(struct bridge_dev_context
116static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 116static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
117 u32 dw_cmd, void *pargs); 117 u32 dw_cmd, void *pargs);
118static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 118static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
119static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
120 u32 va, u32 size,
121 struct hw_mmu_map_attrs_t *map_attrs);
122static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
123 u32 size, struct hw_mmu_map_attrs_t *attrs);
124static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
125 u32 ul_mpu_addr, u32 virt_addr,
126 u32 ul_num_bytes,
127 struct hw_mmu_map_attrs_t *hw_attrs);
128
119bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 129bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
120 130
131/* ----------------------------------- Globals */
132
133/* Attributes of L2 page tables for DSP MMU */
134struct page_info {
135 u32 num_entries; /* Number of valid PTEs in the L2 PT */
136};
137
138/* Attributes used to manage the DSP MMU page tables */
139struct pg_table_attrs {
140 spinlock_t pg_lock; /* Critical section object handle */
141
142 u32 l1_base_pa; /* Physical address of the L1 PT */
143 u32 l1_base_va; /* Virtual address of the L1 PT */
144 u32 l1_size; /* Size of the L1 PT */
145 u32 l1_tbl_alloc_pa;
146 /* Physical address of Allocated mem for L1 table. May not be aligned */
147 u32 l1_tbl_alloc_va;
148 /* Virtual address of Allocated mem for L1 table. May not be aligned */
149 u32 l1_tbl_alloc_sz;
150 /* Size of consistent memory allocated for L1 table.
151 * May not be aligned */
152
153 u32 l2_base_pa; /* Physical address of the L2 PT */
154 u32 l2_base_va; /* Virtual address of the L2 PT */
155 u32 l2_size; /* Size of the L2 PT */
156 u32 l2_tbl_alloc_pa;
157 /* Physical address of Allocated mem for L2 table. May not be aligned */
158 u32 l2_tbl_alloc_va;
159 /* Virtual address of Allocated mem for L2 table. May not be aligned */
160 u32 l2_tbl_alloc_sz;
161 /* Size of consistent memory allocated for L2 table.
162 * May not be aligned */
163
164 u32 l2_num_pages; /* Number of allocated L2 PT */
165 /* Array [l2_num_pages] of L2 PT info structs */
166 struct page_info *pg_info;
167};
168
121/* 169/*
122 * This Bridge driver's function interface table. 170 * This Bridge driver's function interface table.
123 */ 171 */
@@ -166,6 +214,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
166 bridge_msg_set_queue_id, 214 bridge_msg_set_queue_id,
167}; 215};
168 216
217static inline void flush_all(struct bridge_dev_context *dev_context)
218{
219 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
220 dev_context->dw_brd_state == BRD_HIBERNATION)
221 wake_dsp(dev_context, NULL);
222
223 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
224}
225
226static void bad_page_dump(u32 pa, struct page *pg)
227{
228 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
229 pr_emerg("Bad page state in process '%s'\n"
230 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
231 "Backtrace:\n",
232 current->comm, pg, (int)(2 * sizeof(unsigned long)),
233 (unsigned long)pg->flags, pg->mapping,
234 page_mapcount(pg), page_count(pg));
235 dump_stack();
236}
237
169/* 238/*
170 * ======== bridge_drv_entry ======== 239 * ======== bridge_drv_entry ========
171 * purpose: 240 * purpose:
@@ -571,6 +640,7 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
571{ 640{
572 int status = 0; 641 int status = 0;
573 struct bridge_dev_context *dev_context = dev_ctxt; 642 struct bridge_dev_context *dev_context = dev_ctxt;
643 struct pg_table_attrs *pt_attrs;
574 u32 dsp_pwr_state; 644 u32 dsp_pwr_state;
575 int i; 645 int i;
576 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; 646 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
@@ -609,6 +679,14 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
609 679
610 dsp_wdt_enable(false); 680 dsp_wdt_enable(false);
611 681
682 /* This is a good place to clear the MMU page tables as well */
683 if (dev_context->pt_attrs) {
684 pt_attrs = dev_context->pt_attrs;
685 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
686 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
687 memset((u8 *) pt_attrs->pg_info, 0x00,
688 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
689 }
612 /* Reset DSP */ 690 /* Reset DSP */
613 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 691 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
614 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 692 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -699,6 +777,10 @@ static int bridge_dev_create(struct bridge_dev_context
699 struct bridge_dev_context *dev_context = NULL; 777 struct bridge_dev_context *dev_context = NULL;
700 s32 entry_ndx; 778 s32 entry_ndx;
701 struct cfg_hostres *resources = config_param; 779 struct cfg_hostres *resources = config_param;
780 struct pg_table_attrs *pt_attrs;
781 u32 pg_tbl_pa;
782 u32 pg_tbl_va;
783 u32 align_size;
702 struct drv_data *drv_datap = dev_get_drvdata(bridge); 784 struct drv_data *drv_datap = dev_get_drvdata(bridge);
703 785
704 /* Allocate and initialize a data structure to contain the bridge driver 786 /* Allocate and initialize a data structure to contain the bridge driver
@@ -729,8 +811,97 @@ static int bridge_dev_create(struct bridge_dev_context
729 if (!dev_context->dw_dsp_base_addr) 811 if (!dev_context->dw_dsp_base_addr)
730 status = -EPERM; 812 status = -EPERM;
731 813
814 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
815 if (pt_attrs != NULL) {
816 /* Assuming that we use only DSP's memory map
817 * until 0x4000:0000 , we would need only 1024
818 * L1 enties i.e L1 size = 4K */
819 pt_attrs->l1_size = 0x1000;
820 align_size = pt_attrs->l1_size;
821 /* Align sizes are expected to be power of 2 */
822 /* we like to get aligned on L1 table size */
823 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
824 align_size, &pg_tbl_pa);
825
826 /* Check if the PA is aligned for us */
827 if ((pg_tbl_pa) & (align_size - 1)) {
828 /* PA not aligned to page table size ,
829 * try with more allocation and align */
830 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
831 pt_attrs->l1_size);
832 /* we like to get aligned on L1 table size */
833 pg_tbl_va =
834 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
835 align_size, &pg_tbl_pa);
836 /* We should be able to get aligned table now */
837 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
838 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
839 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
840 /* Align the PA to the next 'align' boundary */
841 pt_attrs->l1_base_pa =
842 ((pg_tbl_pa) +
843 (align_size - 1)) & (~(align_size - 1));
844 pt_attrs->l1_base_va =
845 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
846 } else {
847 /* We got aligned PA, cool */
848 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
849 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
850 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
851 pt_attrs->l1_base_pa = pg_tbl_pa;
852 pt_attrs->l1_base_va = pg_tbl_va;
853 }
854 if (pt_attrs->l1_base_va)
855 memset((u8 *) pt_attrs->l1_base_va, 0x00,
856 pt_attrs->l1_size);
857
858 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
859 * L4 pages */
860 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
861 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
862 pt_attrs->l2_num_pages;
863 align_size = 4; /* Make it u32 aligned */
864 /* we like to get aligned on L1 table size */
865 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
866 align_size, &pg_tbl_pa);
867 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
868 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
869 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
870 pt_attrs->l2_base_pa = pg_tbl_pa;
871 pt_attrs->l2_base_va = pg_tbl_va;
872
873 if (pt_attrs->l2_base_va)
874 memset((u8 *) pt_attrs->l2_base_va, 0x00,
875 pt_attrs->l2_size);
876
877 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
878 sizeof(struct page_info), GFP_KERNEL);
879 dev_dbg(bridge,
880 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
881 "%x, size %x\n", pt_attrs->l1_base_pa,
882 pt_attrs->l1_base_va, pt_attrs->l1_size,
883 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
884 pt_attrs->l2_size);
885 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
886 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
887 }
888 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
889 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
890 dev_context->pt_attrs = pt_attrs;
891 else
892 status = -ENOMEM;
893
732 if (!status) { 894 if (!status) {
895 spin_lock_init(&pt_attrs->pg_lock);
733 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; 896 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
897
898 /* Set the Clock Divisor for the DSP module */
899 udelay(5);
900 /* MMU address is obtained from the host
901 * resources struct */
902 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
903 }
904 if (!status) {
734 dev_context->hdev_obj = hdev_obj; 905 dev_context->hdev_obj = hdev_obj;
735 /* Store current board state. */ 906 /* Store current board state. */
736 dev_context->dw_brd_state = BRD_UNKNOWN; 907 dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -740,6 +911,23 @@ static int bridge_dev_create(struct bridge_dev_context
740 /* Return ptr to our device state to the DSP API for storage */ 911 /* Return ptr to our device state to the DSP API for storage */
741 *dev_cntxt = dev_context; 912 *dev_cntxt = dev_context;
742 } else { 913 } else {
914 if (pt_attrs != NULL) {
915 kfree(pt_attrs->pg_info);
916
917 if (pt_attrs->l2_tbl_alloc_va) {
918 mem_free_phys_mem((void *)
919 pt_attrs->l2_tbl_alloc_va,
920 pt_attrs->l2_tbl_alloc_pa,
921 pt_attrs->l2_tbl_alloc_sz);
922 }
923 if (pt_attrs->l1_tbl_alloc_va) {
924 mem_free_phys_mem((void *)
925 pt_attrs->l1_tbl_alloc_va,
926 pt_attrs->l1_tbl_alloc_pa,
927 pt_attrs->l1_tbl_alloc_sz);
928 }
929 }
930 kfree(pt_attrs);
743 kfree(dev_context); 931 kfree(dev_context);
744 } 932 }
745func_end: 933func_end:
@@ -807,6 +995,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
807 */ 995 */
808static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) 996static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
809{ 997{
998 struct pg_table_attrs *pt_attrs;
810 int status = 0; 999 int status = 0;
811 struct bridge_dev_context *dev_context = (struct bridge_dev_context *) 1000 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
812 dev_ctxt; 1001 dev_ctxt;
@@ -820,6 +1009,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
820 1009
821 /* first put the device to stop state */ 1010 /* first put the device to stop state */
822 bridge_brd_stop(dev_context); 1011 bridge_brd_stop(dev_context);
1012 if (dev_context->pt_attrs) {
1013 pt_attrs = dev_context->pt_attrs;
1014 kfree(pt_attrs->pg_info);
1015
1016 if (pt_attrs->l2_tbl_alloc_va) {
1017 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
1018 pt_attrs->l2_tbl_alloc_pa,
1019 pt_attrs->l2_tbl_alloc_sz);
1020 }
1021 if (pt_attrs->l1_tbl_alloc_va) {
1022 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
1023 pt_attrs->l1_tbl_alloc_pa,
1024 pt_attrs->l1_tbl_alloc_sz);
1025 }
1026 kfree(pt_attrs);
1027
1028 }
823 1029
824 if (dev_context->resources) { 1030 if (dev_context->resources) {
825 host_res = dev_context->resources; 1031 host_res = dev_context->resources;
@@ -1116,6 +1322,225 @@ int user_to_dsp_unmap(struct iommu *mmu, u32 da)
1116} 1322}
1117 1323
1118/* 1324/*
1325 * ======== pte_update ========
1326 * This function calculates the optimum page-aligned addresses and sizes
1327 * Caller must pass page-aligned values
1328 */
1329static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1330 u32 va, u32 size,
1331 struct hw_mmu_map_attrs_t *map_attrs)
1332{
1333 u32 i;
1334 u32 all_bits;
1335 u32 pa_curr = pa;
1336 u32 va_curr = va;
1337 u32 num_bytes = size;
1338 struct bridge_dev_context *dev_context = dev_ctxt;
1339 int status = 0;
1340 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1341 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1342 };
1343
1344 while (num_bytes && !status) {
1345 /* To find the max. page size with which both PA & VA are
1346 * aligned */
1347 all_bits = pa_curr | va_curr;
1348
1349 for (i = 0; i < 4; i++) {
1350 if ((num_bytes >= page_size[i]) && ((all_bits &
1351 (page_size[i] -
1352 1)) == 0)) {
1353 status =
1354 pte_set(dev_context->pt_attrs, pa_curr,
1355 va_curr, page_size[i], map_attrs);
1356 pa_curr += page_size[i];
1357 va_curr += page_size[i];
1358 num_bytes -= page_size[i];
1359 /* Don't try smaller sizes. Hopefully we have
1360 * reached an address aligned to a bigger page
1361 * size */
1362 break;
1363 }
1364 }
1365 }
1366
1367 return status;
1368}
1369
1370/*
1371 * ======== pte_set ========
1372 * This function calculates PTE address (MPU virtual) to be updated
1373 * It also manages the L2 page tables
1374 */
1375static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1376 u32 size, struct hw_mmu_map_attrs_t *attrs)
1377{
1378 u32 i;
1379 u32 pte_val;
1380 u32 pte_addr_l1;
1381 u32 pte_size;
1382 /* Base address of the PT that will be updated */
1383 u32 pg_tbl_va;
1384 u32 l1_base_va;
1385 /* Compiler warns that the next three variables might be used
1386 * uninitialized in this function. Doesn't seem so. Working around,
1387 * anyways. */
1388 u32 l2_base_va = 0;
1389 u32 l2_base_pa = 0;
1390 u32 l2_page_num = 0;
1391 int status = 0;
1392
1393 l1_base_va = pt->l1_base_va;
1394 pg_tbl_va = l1_base_va;
1395 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1396 /* Find whether the L1 PTE points to a valid L2 PT */
1397 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1398 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1399 pte_val = *(u32 *) pte_addr_l1;
1400 pte_size = hw_mmu_pte_size_l1(pte_val);
1401 } else {
1402 return -EPERM;
1403 }
1404 spin_lock(&pt->pg_lock);
1405 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1406 /* Get the L2 PA from the L1 PTE, and find
1407 * corresponding L2 VA */
1408 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1409 l2_base_va =
1410 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1411 l2_page_num =
1412 (l2_base_pa -
1413 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1414 } else if (pte_size == 0) {
1415 /* L1 PTE is invalid. Allocate a L2 PT and
1416 * point the L1 PTE to it */
1417 /* Find a free L2 PT. */
1418 for (i = 0; (i < pt->l2_num_pages) &&
1419 (pt->pg_info[i].num_entries != 0); i++)
1420 ;;
1421 if (i < pt->l2_num_pages) {
1422 l2_page_num = i;
1423 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1424 HW_MMU_COARSE_PAGE_SIZE);
1425 l2_base_va = pt->l2_base_va + (l2_page_num *
1426 HW_MMU_COARSE_PAGE_SIZE);
1427 /* Endianness attributes are ignored for
1428 * HW_MMU_COARSE_PAGE_SIZE */
1429 status =
1430 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1431 HW_MMU_COARSE_PAGE_SIZE,
1432 attrs);
1433 } else {
1434 status = -ENOMEM;
1435 }
1436 } else {
1437 /* Found valid L1 PTE of another size.
1438 * Should not overwrite it. */
1439 status = -EPERM;
1440 }
1441 if (!status) {
1442 pg_tbl_va = l2_base_va;
1443 if (size == HW_PAGE_SIZE64KB)
1444 pt->pg_info[l2_page_num].num_entries += 16;
1445 else
1446 pt->pg_info[l2_page_num].num_entries++;
1447 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1448 "%x, num_entries %x\n", l2_base_va,
1449 l2_base_pa, l2_page_num,
1450 pt->pg_info[l2_page_num].num_entries);
1451 }
1452 spin_unlock(&pt->pg_lock);
1453 }
1454 if (!status) {
1455 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1456 pg_tbl_va, pa, va, size);
1457 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1458 "mixed_size %x\n", attrs->endianism,
1459 attrs->element_size, attrs->mixed_size);
1460 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1461 }
1462
1463 return status;
1464}
1465
1466/* Memory map kernel VA -- memory allocated with vmalloc */
1467static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1468 u32 ul_mpu_addr, u32 virt_addr,
1469 u32 ul_num_bytes,
1470 struct hw_mmu_map_attrs_t *hw_attrs)
1471{
1472 int status = 0;
1473 struct page *page[1];
1474 u32 i;
1475 u32 pa_curr;
1476 u32 pa_next;
1477 u32 va_curr;
1478 u32 size_curr;
1479 u32 num_pages;
1480 u32 pa;
1481 u32 num_of4k_pages;
1482 u32 temp = 0;
1483
1484 /*
1485 * Do Kernel va to pa translation.
1486 * Combine physically contiguous regions to reduce TLBs.
1487 * Pass the translated pa to pte_update.
1488 */
1489 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1490 i = 0;
1491 va_curr = ul_mpu_addr;
1492 page[0] = vmalloc_to_page((void *)va_curr);
1493 pa_next = page_to_phys(page[0]);
1494 while (!status && (i < num_pages)) {
1495 /*
1496 * Reuse pa_next from the previous iteraion to avoid
1497 * an extra va2pa call
1498 */
1499 pa_curr = pa_next;
1500 size_curr = PAGE_SIZE;
1501 /*
1502 * If the next page is physically contiguous,
1503 * map it with the current one by increasing
1504 * the size of the region to be mapped
1505 */
1506 while (++i < num_pages) {
1507 page[0] =
1508 vmalloc_to_page((void *)(va_curr + size_curr));
1509 pa_next = page_to_phys(page[0]);
1510
1511 if (pa_next == (pa_curr + size_curr))
1512 size_curr += PAGE_SIZE;
1513 else
1514 break;
1515
1516 }
1517 if (pa_next == 0) {
1518 status = -ENOMEM;
1519 break;
1520 }
1521 pa = pa_curr;
1522 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1523 while (temp++ < num_of4k_pages) {
1524 get_page(PHYS_TO_PAGE(pa));
1525 pa += HW_PAGE_SIZE4KB;
1526 }
1527 status = pte_update(dev_context, pa_curr, virt_addr +
1528 (va_curr - ul_mpu_addr), size_curr,
1529 hw_attrs);
1530 va_curr += size_curr;
1531 }
1532 /*
1533 * In any case, flush the TLB
1534 * This is called from here instead from pte_update to avoid unnecessary
1535 * repetition while mapping non-contiguous physical regions of a virtual
1536 * region
1537 */
1538 flush_all(dev_context);
1539 dev_dbg(bridge, "%s status %x\n", __func__, status);
1540 return status;
1541}
1542
1543/*
1119 * ======== wait_for_start ======== 1544 * ======== wait_for_start ========
1120 * Wait for the singal from DSP that it has started, or time out. 1545 * Wait for the singal from DSP that it has started, or time out.
1121 */ 1546 */