aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorFernando Guzman Lugo <x0095840@ti.com>2010-10-05 16:35:37 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-10-05 18:30:57 -0400
commite7396e77d9e4230bf725b5807732cbca191d111f (patch)
tree77a3a0b4179b6c7e9963b2260b7797e2a466b063 /drivers/staging
parent4dd1944ab7242d76534c97d5fef0ce541a2f1040 (diff)
staging: tidspbridge - remove custom mmu code from tiomap3430.c
This patch removes all the custom mmu code remaining in tiomap3430.c which is not needed anymore. Signed-off-by: Fernando Guzman Lugo <x0095840@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c425
2 files changed, 0 insertions, 427 deletions
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 877749258ac..394a64a7ba2 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -330,7 +330,6 @@ struct bridge_dev_context {
330 */ 330 */
331 u32 dw_dsp_ext_base_addr; /* See the comment above */ 331 u32 dw_dsp_ext_base_addr; /* See the comment above */
332 u32 dw_api_reg_base; /* API mem map'd registers */ 332 u32 dw_api_reg_base; /* API mem map'd registers */
333 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
334 u32 dw_api_clk_base; /* CLK Registers */ 333 u32 dw_api_clk_base; /* CLK Registers */
335 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 334 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
336 u32 dw_public_rhea; /* Pub Rhea */ 335 u32 dw_public_rhea; /* Pub Rhea */
@@ -356,7 +355,6 @@ struct bridge_dev_context {
356 355
357 /* TC Settings */ 356 /* TC Settings */
358 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 357 bool tc_word_swap_on; /* Traffic Controller Word Swap */
359 struct pg_table_attrs *pt_attrs;
360 u32 dsp_per_clks; 358 u32 dsp_per_clks;
361}; 359};
362 360
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 0e5b8051bf2..e5f67be6c2c 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -104,56 +104,8 @@ static int bridge_dev_create(struct bridge_dev_context
104static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 104static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
105 u32 dw_cmd, void *pargs); 105 u32 dw_cmd, void *pargs);
106static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 106static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
107static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
108 u32 va, u32 size,
109 struct hw_mmu_map_attrs_t *map_attrs);
110static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
111 u32 size, struct hw_mmu_map_attrs_t *attrs);
112static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
113 u32 ul_mpu_addr, u32 virt_addr,
114 u32 ul_num_bytes,
115 struct hw_mmu_map_attrs_t *hw_attrs);
116
117bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 107bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
118 108
119/* ----------------------------------- Globals */
120
121/* Attributes of L2 page tables for DSP MMU */
122struct page_info {
123 u32 num_entries; /* Number of valid PTEs in the L2 PT */
124};
125
126/* Attributes used to manage the DSP MMU page tables */
127struct pg_table_attrs {
128 spinlock_t pg_lock; /* Critical section object handle */
129
130 u32 l1_base_pa; /* Physical address of the L1 PT */
131 u32 l1_base_va; /* Virtual address of the L1 PT */
132 u32 l1_size; /* Size of the L1 PT */
133 u32 l1_tbl_alloc_pa;
134 /* Physical address of Allocated mem for L1 table. May not be aligned */
135 u32 l1_tbl_alloc_va;
136 /* Virtual address of Allocated mem for L1 table. May not be aligned */
137 u32 l1_tbl_alloc_sz;
138 /* Size of consistent memory allocated for L1 table.
139 * May not be aligned */
140
141 u32 l2_base_pa; /* Physical address of the L2 PT */
142 u32 l2_base_va; /* Virtual address of the L2 PT */
143 u32 l2_size; /* Size of the L2 PT */
144 u32 l2_tbl_alloc_pa;
145 /* Physical address of Allocated mem for L2 table. May not be aligned */
146 u32 l2_tbl_alloc_va;
147 /* Virtual address of Allocated mem for L2 table. May not be aligned */
148 u32 l2_tbl_alloc_sz;
149 /* Size of consistent memory allocated for L2 table.
150 * May not be aligned */
151
152 u32 l2_num_pages; /* Number of allocated L2 PT */
153 /* Array [l2_num_pages] of L2 PT info structs */
154 struct page_info *pg_info;
155};
156
157/* 109/*
158 * This Bridge driver's function interface table. 110 * This Bridge driver's function interface table.
159 */ 111 */
@@ -202,27 +154,6 @@ static struct bridge_drv_interface drv_interface_fxns = {
202 bridge_msg_set_queue_id, 154 bridge_msg_set_queue_id,
203}; 155};
204 156
205static inline void flush_all(struct bridge_dev_context *dev_context)
206{
207 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
208 dev_context->dw_brd_state == BRD_HIBERNATION)
209 wake_dsp(dev_context, NULL);
210
211 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
212}
213
214static void bad_page_dump(u32 pa, struct page *pg)
215{
216 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
217 pr_emerg("Bad page state in process '%s'\n"
218 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
219 "Backtrace:\n",
220 current->comm, pg, (int)(2 * sizeof(unsigned long)),
221 (unsigned long)pg->flags, pg->mapping,
222 page_mapcount(pg), page_count(pg));
223 dump_stack();
224}
225
226/* 157/*
227 * ======== bridge_drv_entry ======== 158 * ======== bridge_drv_entry ========
228 * purpose: 159 * purpose:
@@ -628,7 +559,6 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
628{ 559{
629 int status = 0; 560 int status = 0;
630 struct bridge_dev_context *dev_context = dev_ctxt; 561 struct bridge_dev_context *dev_context = dev_ctxt;
631 struct pg_table_attrs *pt_attrs;
632 u32 dsp_pwr_state; 562 u32 dsp_pwr_state;
633 int i; 563 int i;
634 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; 564 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
@@ -667,14 +597,6 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
667 597
668 dsp_wdt_enable(false); 598 dsp_wdt_enable(false);
669 599
670 /* This is a good place to clear the MMU page tables as well */
671 if (dev_context->pt_attrs) {
672 pt_attrs = dev_context->pt_attrs;
673 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
674 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
675 memset((u8 *) pt_attrs->pg_info, 0x00,
676 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
677 }
678 /* Reset DSP */ 600 /* Reset DSP */
679 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 601 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
680 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 602 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -765,10 +687,6 @@ static int bridge_dev_create(struct bridge_dev_context
765 struct bridge_dev_context *dev_context = NULL; 687 struct bridge_dev_context *dev_context = NULL;
766 s32 entry_ndx; 688 s32 entry_ndx;
767 struct cfg_hostres *resources = config_param; 689 struct cfg_hostres *resources = config_param;
768 struct pg_table_attrs *pt_attrs;
769 u32 pg_tbl_pa;
770 u32 pg_tbl_va;
771 u32 align_size;
772 struct drv_data *drv_datap = dev_get_drvdata(bridge); 690 struct drv_data *drv_datap = dev_get_drvdata(bridge);
773 691
774 /* Allocate and initialize a data structure to contain the bridge driver 692 /* Allocate and initialize a data structure to contain the bridge driver
@@ -799,97 +717,8 @@ static int bridge_dev_create(struct bridge_dev_context
799 if (!dev_context->dw_dsp_base_addr) 717 if (!dev_context->dw_dsp_base_addr)
800 status = -EPERM; 718 status = -EPERM;
801 719
802 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
803 if (pt_attrs != NULL) {
804 /* Assuming that we use only DSP's memory map
805 * until 0x4000:0000 , we would need only 1024
806 * L1 enties i.e L1 size = 4K */
807 pt_attrs->l1_size = 0x1000;
808 align_size = pt_attrs->l1_size;
809 /* Align sizes are expected to be power of 2 */
810 /* we like to get aligned on L1 table size */
811 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
812 align_size, &pg_tbl_pa);
813
814 /* Check if the PA is aligned for us */
815 if ((pg_tbl_pa) & (align_size - 1)) {
816 /* PA not aligned to page table size ,
817 * try with more allocation and align */
818 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
819 pt_attrs->l1_size);
820 /* we like to get aligned on L1 table size */
821 pg_tbl_va =
822 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
823 align_size, &pg_tbl_pa);
824 /* We should be able to get aligned table now */
825 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
826 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
827 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
828 /* Align the PA to the next 'align' boundary */
829 pt_attrs->l1_base_pa =
830 ((pg_tbl_pa) +
831 (align_size - 1)) & (~(align_size - 1));
832 pt_attrs->l1_base_va =
833 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
834 } else {
835 /* We got aligned PA, cool */
836 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
837 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
838 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
839 pt_attrs->l1_base_pa = pg_tbl_pa;
840 pt_attrs->l1_base_va = pg_tbl_va;
841 }
842 if (pt_attrs->l1_base_va)
843 memset((u8 *) pt_attrs->l1_base_va, 0x00,
844 pt_attrs->l1_size);
845
846 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
847 * L4 pages */
848 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
849 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
850 pt_attrs->l2_num_pages;
851 align_size = 4; /* Make it u32 aligned */
852 /* we like to get aligned on L1 table size */
853 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
854 align_size, &pg_tbl_pa);
855 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
856 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
857 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
858 pt_attrs->l2_base_pa = pg_tbl_pa;
859 pt_attrs->l2_base_va = pg_tbl_va;
860
861 if (pt_attrs->l2_base_va)
862 memset((u8 *) pt_attrs->l2_base_va, 0x00,
863 pt_attrs->l2_size);
864
865 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
866 sizeof(struct page_info), GFP_KERNEL);
867 dev_dbg(bridge,
868 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
869 "%x, size %x\n", pt_attrs->l1_base_pa,
870 pt_attrs->l1_base_va, pt_attrs->l1_size,
871 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
872 pt_attrs->l2_size);
873 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
874 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
875 }
876 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
877 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
878 dev_context->pt_attrs = pt_attrs;
879 else
880 status = -ENOMEM;
881
882 if (!status) { 720 if (!status) {
883 spin_lock_init(&pt_attrs->pg_lock);
884 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; 721 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
885
886 /* Set the Clock Divisor for the DSP module */
887 udelay(5);
888 /* MMU address is obtained from the host
889 * resources struct */
890 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
891 }
892 if (!status) {
893 dev_context->hdev_obj = hdev_obj; 722 dev_context->hdev_obj = hdev_obj;
894 /* Store current board state. */ 723 /* Store current board state. */
895 dev_context->dw_brd_state = BRD_UNKNOWN; 724 dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -899,23 +728,6 @@ static int bridge_dev_create(struct bridge_dev_context
899 /* Return ptr to our device state to the DSP API for storage */ 728 /* Return ptr to our device state to the DSP API for storage */
900 *dev_cntxt = dev_context; 729 *dev_cntxt = dev_context;
901 } else { 730 } else {
902 if (pt_attrs != NULL) {
903 kfree(pt_attrs->pg_info);
904
905 if (pt_attrs->l2_tbl_alloc_va) {
906 mem_free_phys_mem((void *)
907 pt_attrs->l2_tbl_alloc_va,
908 pt_attrs->l2_tbl_alloc_pa,
909 pt_attrs->l2_tbl_alloc_sz);
910 }
911 if (pt_attrs->l1_tbl_alloc_va) {
912 mem_free_phys_mem((void *)
913 pt_attrs->l1_tbl_alloc_va,
914 pt_attrs->l1_tbl_alloc_pa,
915 pt_attrs->l1_tbl_alloc_sz);
916 }
917 }
918 kfree(pt_attrs);
919 kfree(dev_context); 731 kfree(dev_context);
920 } 732 }
921func_end: 733func_end:
@@ -983,7 +795,6 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
983 */ 795 */
984static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) 796static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
985{ 797{
986 struct pg_table_attrs *pt_attrs;
987 int status = 0; 798 int status = 0;
988 struct bridge_dev_context *dev_context = (struct bridge_dev_context *) 799 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
989 dev_ctxt; 800 dev_ctxt;
@@ -997,23 +808,6 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
997 808
998 /* first put the device to stop state */ 809 /* first put the device to stop state */
999 bridge_brd_stop(dev_context); 810 bridge_brd_stop(dev_context);
1000 if (dev_context->pt_attrs) {
1001 pt_attrs = dev_context->pt_attrs;
1002 kfree(pt_attrs->pg_info);
1003
1004 if (pt_attrs->l2_tbl_alloc_va) {
1005 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
1006 pt_attrs->l2_tbl_alloc_pa,
1007 pt_attrs->l2_tbl_alloc_sz);
1008 }
1009 if (pt_attrs->l1_tbl_alloc_va) {
1010 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
1011 pt_attrs->l1_tbl_alloc_pa,
1012 pt_attrs->l1_tbl_alloc_sz);
1013 }
1014 kfree(pt_attrs);
1015
1016 }
1017 811
1018 if (dev_context->resources) { 812 if (dev_context->resources) {
1019 host_res = dev_context->resources; 813 host_res = dev_context->resources;
@@ -1310,225 +1104,6 @@ int user_to_dsp_unmap(struct iommu *mmu, u32 da)
1310} 1104}
1311 1105
1312/* 1106/*
1313 * ======== pte_update ========
1314 * This function calculates the optimum page-aligned addresses and sizes
1315 * Caller must pass page-aligned values
1316 */
1317static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1318 u32 va, u32 size,
1319 struct hw_mmu_map_attrs_t *map_attrs)
1320{
1321 u32 i;
1322 u32 all_bits;
1323 u32 pa_curr = pa;
1324 u32 va_curr = va;
1325 u32 num_bytes = size;
1326 struct bridge_dev_context *dev_context = dev_ctxt;
1327 int status = 0;
1328 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1329 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1330 };
1331
1332 while (num_bytes && !status) {
1333 /* To find the max. page size with which both PA & VA are
1334 * aligned */
1335 all_bits = pa_curr | va_curr;
1336
1337 for (i = 0; i < 4; i++) {
1338 if ((num_bytes >= page_size[i]) && ((all_bits &
1339 (page_size[i] -
1340 1)) == 0)) {
1341 status =
1342 pte_set(dev_context->pt_attrs, pa_curr,
1343 va_curr, page_size[i], map_attrs);
1344 pa_curr += page_size[i];
1345 va_curr += page_size[i];
1346 num_bytes -= page_size[i];
1347 /* Don't try smaller sizes. Hopefully we have
1348 * reached an address aligned to a bigger page
1349 * size */
1350 break;
1351 }
1352 }
1353 }
1354
1355 return status;
1356}
1357
1358/*
1359 * ======== pte_set ========
1360 * This function calculates PTE address (MPU virtual) to be updated
1361 * It also manages the L2 page tables
1362 */
1363static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1364 u32 size, struct hw_mmu_map_attrs_t *attrs)
1365{
1366 u32 i;
1367 u32 pte_val;
1368 u32 pte_addr_l1;
1369 u32 pte_size;
1370 /* Base address of the PT that will be updated */
1371 u32 pg_tbl_va;
1372 u32 l1_base_va;
1373 /* Compiler warns that the next three variables might be used
1374 * uninitialized in this function. Doesn't seem so. Working around,
1375 * anyways. */
1376 u32 l2_base_va = 0;
1377 u32 l2_base_pa = 0;
1378 u32 l2_page_num = 0;
1379 int status = 0;
1380
1381 l1_base_va = pt->l1_base_va;
1382 pg_tbl_va = l1_base_va;
1383 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1384 /* Find whether the L1 PTE points to a valid L2 PT */
1385 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1386 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1387 pte_val = *(u32 *) pte_addr_l1;
1388 pte_size = hw_mmu_pte_size_l1(pte_val);
1389 } else {
1390 return -EPERM;
1391 }
1392 spin_lock(&pt->pg_lock);
1393 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1394 /* Get the L2 PA from the L1 PTE, and find
1395 * corresponding L2 VA */
1396 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1397 l2_base_va =
1398 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1399 l2_page_num =
1400 (l2_base_pa -
1401 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1402 } else if (pte_size == 0) {
1403 /* L1 PTE is invalid. Allocate a L2 PT and
1404 * point the L1 PTE to it */
1405 /* Find a free L2 PT. */
1406 for (i = 0; (i < pt->l2_num_pages) &&
1407 (pt->pg_info[i].num_entries != 0); i++)
1408 ;;
1409 if (i < pt->l2_num_pages) {
1410 l2_page_num = i;
1411 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1412 HW_MMU_COARSE_PAGE_SIZE);
1413 l2_base_va = pt->l2_base_va + (l2_page_num *
1414 HW_MMU_COARSE_PAGE_SIZE);
1415 /* Endianness attributes are ignored for
1416 * HW_MMU_COARSE_PAGE_SIZE */
1417 status =
1418 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1419 HW_MMU_COARSE_PAGE_SIZE,
1420 attrs);
1421 } else {
1422 status = -ENOMEM;
1423 }
1424 } else {
1425 /* Found valid L1 PTE of another size.
1426 * Should not overwrite it. */
1427 status = -EPERM;
1428 }
1429 if (!status) {
1430 pg_tbl_va = l2_base_va;
1431 if (size == HW_PAGE_SIZE64KB)
1432 pt->pg_info[l2_page_num].num_entries += 16;
1433 else
1434 pt->pg_info[l2_page_num].num_entries++;
1435 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1436 "%x, num_entries %x\n", l2_base_va,
1437 l2_base_pa, l2_page_num,
1438 pt->pg_info[l2_page_num].num_entries);
1439 }
1440 spin_unlock(&pt->pg_lock);
1441 }
1442 if (!status) {
1443 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1444 pg_tbl_va, pa, va, size);
1445 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1446 "mixed_size %x\n", attrs->endianism,
1447 attrs->element_size, attrs->mixed_size);
1448 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1449 }
1450
1451 return status;
1452}
1453
1454/* Memory map kernel VA -- memory allocated with vmalloc */
1455static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1456 u32 ul_mpu_addr, u32 virt_addr,
1457 u32 ul_num_bytes,
1458 struct hw_mmu_map_attrs_t *hw_attrs)
1459{
1460 int status = 0;
1461 struct page *page[1];
1462 u32 i;
1463 u32 pa_curr;
1464 u32 pa_next;
1465 u32 va_curr;
1466 u32 size_curr;
1467 u32 num_pages;
1468 u32 pa;
1469 u32 num_of4k_pages;
1470 u32 temp = 0;
1471
1472 /*
1473 * Do Kernel va to pa translation.
1474 * Combine physically contiguous regions to reduce TLBs.
1475 * Pass the translated pa to pte_update.
1476 */
1477 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1478 i = 0;
1479 va_curr = ul_mpu_addr;
1480 page[0] = vmalloc_to_page((void *)va_curr);
1481 pa_next = page_to_phys(page[0]);
1482 while (!status && (i < num_pages)) {
1483 /*
1484 * Reuse pa_next from the previous iteraion to avoid
1485 * an extra va2pa call
1486 */
1487 pa_curr = pa_next;
1488 size_curr = PAGE_SIZE;
1489 /*
1490 * If the next page is physically contiguous,
1491 * map it with the current one by increasing
1492 * the size of the region to be mapped
1493 */
1494 while (++i < num_pages) {
1495 page[0] =
1496 vmalloc_to_page((void *)(va_curr + size_curr));
1497 pa_next = page_to_phys(page[0]);
1498
1499 if (pa_next == (pa_curr + size_curr))
1500 size_curr += PAGE_SIZE;
1501 else
1502 break;
1503
1504 }
1505 if (pa_next == 0) {
1506 status = -ENOMEM;
1507 break;
1508 }
1509 pa = pa_curr;
1510 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1511 while (temp++ < num_of4k_pages) {
1512 get_page(PHYS_TO_PAGE(pa));
1513 pa += HW_PAGE_SIZE4KB;
1514 }
1515 status = pte_update(dev_context, pa_curr, virt_addr +
1516 (va_curr - ul_mpu_addr), size_curr,
1517 hw_attrs);
1518 va_curr += size_curr;
1519 }
1520 /*
1521 * In any case, flush the TLB
1522 * This is called from here instead from pte_update to avoid unnecessary
1523 * repetition while mapping non-contiguous physical regions of a virtual
1524 * region
1525 */
1526 flush_all(dev_context);
1527 dev_dbg(bridge, "%s status %x\n", __func__, status);
1528 return status;
1529}
1530
1531/*
1532 * ======== wait_for_start ======== 1107 * ======== wait_for_start ========
1533 * Wait for the singal from DSP that it has started, or time out. 1108 * Wait for the singal from DSP that it has started, or time out.
1534 */ 1109 */