aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/tidspbridge
diff options
context:
space:
mode:
authorRene Sapiens <rene.sapiens@ti.com>2011-01-17 22:19:04 -0500
committerOmar Ramirez Luna <omar.ramirez@ti.com>2011-02-04 21:12:09 -0500
commit5108de0ae06190f2ab54b9a1da315b77b33be1e2 (patch)
tree47e4fa1120dbe42f444c7531f86b49d3a9b618c1 /drivers/staging/tidspbridge
parentb4da7fc381c51d42c231f97de912b89dbabe8928 (diff)
staging: tidspbridge: set2 remove hungarian from structs
hungarian notation will be removed from the elements inside structures, the next varibles will be renamed: Original: Replacement: dw_dsp_base_addr dsp_base_addr dw_dmmu_base dmmu_base dw_index index dw_int_addr int_addr dw_internal_size internal_size dw_last_output last_output dw_mem_base mem_base dw_mem_length mem_length dw_mem_phys mem_phys dw_mode mode dw_num_chnls num_chnls dw_offset_for_monitor offset_for_monitor dw_output_mask output_mask dw_page_size page_size dw_pa pa dw_per_base per_base dw_per_pm_base per_pm_base dw_public_rhea public_rhea dw_seg_base_pa seg_base_pa Signed-off-by: Rene Sapiens <rene.sapiens@ti.com> Signed-off-by: Armando Uribe <x0095078@ti.com> Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Diffstat (limited to 'drivers/staging/tidspbridge')
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h8
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c8
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c18
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c104
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c64
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c16
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c18
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h20
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c24
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c8
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c60
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c4
16 files changed, 181 insertions, 181 deletions
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 80bc4755df1..5a14e6f8050 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -327,16 +327,16 @@ struct bridge_dev_context {
327 */ 327 */
328 u32 dsp_ext_base_addr; /* See the comment above */ 328 u32 dsp_ext_base_addr; /* See the comment above */
329 u32 api_reg_base; /* API mem map'd registers */ 329 u32 api_reg_base; /* API mem map'd registers */
330 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */ 330 void __iomem *dsp_mmu_base; /* DSP MMU Mapped registers */
331 u32 api_clk_base; /* CLK Registers */ 331 u32 api_clk_base; /* CLK Registers */
332 u32 dsp_clk_m2_base; /* DSP Clock Module m2 */ 332 u32 dsp_clk_m2_base; /* DSP Clock Module m2 */
333 u32 dw_public_rhea; /* Pub Rhea */ 333 u32 public_rhea; /* Pub Rhea */
334 u32 dw_int_addr; /* MB INTR reg */ 334 u32 int_addr; /* MB INTR reg */
335 u32 dw_tc_endianism; /* TC Endianism register */ 335 u32 dw_tc_endianism; /* TC Endianism register */
336 u32 dw_test_base; /* DSP MMU Mapped registers */ 336 u32 dw_test_base; /* DSP MMU Mapped registers */
337 u32 dw_self_loop; /* Pointer to the selfloop */ 337 u32 dw_self_loop; /* Pointer to the selfloop */
338 u32 dsp_start_add; /* API Boot vector */ 338 u32 dsp_start_add; /* API Boot vector */
339 u32 dw_internal_size; /* Internal memory size */ 339 u32 internal_size; /* Internal memory size */
340 340
341 struct omap_mbox *mbox; /* Mail box handle */ 341 struct omap_mbox *mbox; /* Mail box handle */
342 342
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index a5226272875..59b8d556939 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -272,7 +272,7 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
272 } else { 272 } else {
273 /* Record that we no longer have output buffers 273 /* Record that we no longer have output buffers
274 * available: */ 274 * available: */
275 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id); 275 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
276 } 276 }
277 } 277 }
278 /* Move all IOR's to IOC queue: */ 278 /* Move all IOR's to IOC queue: */
@@ -386,8 +386,8 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
386 /* Total # chnls supported */ 386 /* Total # chnls supported */
387 chnl_mgr_obj->max_channels = max_channels; 387 chnl_mgr_obj->max_channels = max_channels;
388 chnl_mgr_obj->open_channels = 0; 388 chnl_mgr_obj->open_channels = 0;
389 chnl_mgr_obj->dw_output_mask = 0; 389 chnl_mgr_obj->output_mask = 0;
390 chnl_mgr_obj->dw_last_output = 0; 390 chnl_mgr_obj->last_output = 0;
391 chnl_mgr_obj->hdev_obj = hdev_obj; 391 chnl_mgr_obj->hdev_obj = hdev_obj;
392 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); 392 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
393 } else { 393 } else {
@@ -511,7 +511,7 @@ int bridge_chnl_get_info(struct chnl_object *chnl_obj,
511 channel_info->hchnl_mgr = pchnl->chnl_mgr_obj; 511 channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
512 channel_info->event_obj = pchnl->user_event; 512 channel_info->event_obj = pchnl->user_event;
513 channel_info->cnhl_id = pchnl->chnl_id; 513 channel_info->cnhl_id = pchnl->chnl_id;
514 channel_info->dw_mode = pchnl->chnl_mode; 514 channel_info->mode = pchnl->chnl_mode;
515 channel_info->bytes_tx = pchnl->bytes_moved; 515 channel_info->bytes_tx = pchnl->bytes_moved;
516 channel_info->process = pchnl->process; 516 channel_info->process = pchnl->process;
517 channel_info->sync_event = pchnl->sync_event; 517 channel_info->sync_event = pchnl->sync_event;
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 913c7681d80..e89052c8c0e 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -417,8 +417,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
417 417
418 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */ 418 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
419 ndx = 0; 419 ndx = 0;
420 ul_gpp_pa = host_res->dw_mem_phys[1]; 420 ul_gpp_pa = host_res->mem_phys[1];
421 ul_gpp_va = host_res->dw_mem_base[1]; 421 ul_gpp_va = host_res->mem_base[1];
422 /* This is the virtual uncached ioremapped address!!! */ 422 /* This is the virtual uncached ioremapped address!!! */
423 /* Why can't we directly take the DSPVA from the symbols? */ 423 /* Why can't we directly take the DSPVA from the symbols? */
424 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt; 424 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
@@ -441,9 +441,9 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
441 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size); 441 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
442 442
443 if ((ul_seg_size + ul_seg1_size + ul_pad_size) > 443 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
444 host_res->dw_mem_length[1]) { 444 host_res->mem_length[1]) {
445 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n", 445 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
446 __func__, host_res->dw_mem_length[1], 446 __func__, host_res->mem_length[1],
447 ul_seg_size + ul_seg1_size + ul_pad_size); 447 ul_seg_size + ul_seg1_size + ul_pad_size);
448 status = -ENOMEM; 448 status = -ENOMEM;
449 } 449 }
@@ -993,7 +993,7 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
993 * Record the fact that we have a buffer available for 993 * Record the fact that we have a buffer available for
994 * output. 994 * output.
995 */ 995 */
996 chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id); 996 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
997 } else { 997 } else {
998 DBC_ASSERT(io_mode); /* Shouldn't get here. */ 998 DBC_ASSERT(io_mode); /* Shouldn't get here. */
999 } 999 }
@@ -1036,7 +1036,7 @@ static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1036 u32 shift; 1036 u32 shift;
1037 1037
1038 id = (pchnl != 1038 id = (pchnl !=
1039 NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1)); 1039 NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
1040 id = ((id == CHNL_MAXCHANNELS) ? 0 : id); 1040 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1041 if (id >= CHNL_MAXCHANNELS) 1041 if (id >= CHNL_MAXCHANNELS)
1042 goto func_end; 1042 goto func_end;
@@ -1047,7 +1047,7 @@ static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1047 if (mask & shift) { 1047 if (mask & shift) {
1048 ret = id; 1048 ret = id;
1049 if (pchnl == NULL) 1049 if (pchnl == NULL)
1050 chnl_mgr_obj->dw_last_output = id; 1050 chnl_mgr_obj->last_output = id;
1051 break; 1051 break;
1052 } 1052 }
1053 id = id + 1; 1053 id = id + 1;
@@ -1336,7 +1336,7 @@ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1336 dw_dsp_f_mask = sm->dsp_free_mask; 1336 dw_dsp_f_mask = sm->dsp_free_mask;
1337 chnl_id = 1337 chnl_id =
1338 find_ready_output(chnl_mgr_obj, pchnl, 1338 find_ready_output(chnl_mgr_obj, pchnl,
1339 (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask)); 1339 (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
1340 if (chnl_id == OUTPUTNOTREADY) 1340 if (chnl_id == OUTPUTNOTREADY)
1341 goto func_end; 1341 goto func_end;
1342 1342
@@ -1358,7 +1358,7 @@ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1358 1358
1359 /* Record fact that no more I/O buffers available */ 1359 /* Record fact that no more I/O buffers available */
1360 if (list_empty(&pchnl->pio_requests)) 1360 if (list_empty(&pchnl->pio_requests))
1361 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id); 1361 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
1362 1362
1363 /* Transfer buffer to DSP side */ 1363 /* Transfer buffer to DSP side */
1364 chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size, 1364 chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index ce0556d026c..5964a13d0b8 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -233,7 +233,7 @@ static inline void flush_all(struct bridge_dev_context *dev_context)
233 dev_context->brd_state == BRD_HIBERNATION) 233 dev_context->brd_state == BRD_HIBERNATION)
234 wake_dsp(dev_context, NULL); 234 wake_dsp(dev_context, NULL);
235 235
236 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base); 236 hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
237} 237}
238 238
239static void bad_page_dump(u32 pa, struct page *pg) 239static void bad_page_dump(u32 pa, struct page *pg)
@@ -331,7 +331,7 @@ static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
331 } 331 }
332 /* change here to account for the 3 bands of the DSP internal memory */ 332 /* change here to account for the 3 bands of the DSP internal memory */
333 if ((dsp_addr - dev_context->dsp_start_add) < 333 if ((dsp_addr - dev_context->dsp_start_add) <
334 dev_context->dw_internal_size) { 334 dev_context->internal_size) {
335 offset = dsp_addr - dev_context->dsp_start_add; 335 offset = dsp_addr - dev_context->dsp_start_add;
336 } else { 336 } else {
337 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr, 337 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
@@ -452,9 +452,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
452 udelay(100); 452 udelay(100);
453 453
454 /* Disbale the DSP MMU */ 454 /* Disbale the DSP MMU */
455 hw_mmu_disable(resources->dw_dmmu_base); 455 hw_mmu_disable(resources->dmmu_base);
456 /* Disable TWL */ 456 /* Disable TWL */
457 hw_mmu_twl_disable(resources->dw_dmmu_base); 457 hw_mmu_twl_disable(resources->dmmu_base);
458 458
459 /* Only make TLB entry if both addresses are non-zero */ 459 /* Only make TLB entry if both addresses are non-zero */
460 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; 460 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
@@ -476,7 +476,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
476 e->ul_dsp_va, 476 e->ul_dsp_va,
477 e->ul_size); 477 e->ul_size);
478 478
479 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, 479 hw_mmu_tlb_add(dev_context->dsp_mmu_base,
480 e->ul_gpp_pa, 480 e->ul_gpp_pa,
481 e->ul_dsp_va, 481 e->ul_dsp_va,
482 e->ul_size, 482 e->ul_size,
@@ -490,19 +490,19 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
490 /* Lock the above TLB entries and get the BIOS and load monitor timer 490 /* Lock the above TLB entries and get the BIOS and load monitor timer
491 * information */ 491 * information */
492 if (!status) { 492 if (!status) {
493 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); 493 hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
494 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); 494 hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
495 hw_mmu_ttb_set(resources->dw_dmmu_base, 495 hw_mmu_ttb_set(resources->dmmu_base,
496 dev_context->pt_attrs->l1_base_pa); 496 dev_context->pt_attrs->l1_base_pa);
497 hw_mmu_twl_enable(resources->dw_dmmu_base); 497 hw_mmu_twl_enable(resources->dmmu_base);
498 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ 498 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
499 499
500 temp = __raw_readl((resources->dw_dmmu_base) + 0x10); 500 temp = __raw_readl((resources->dmmu_base) + 0x10);
501 temp = (temp & 0xFFFFFFEF) | 0x11; 501 temp = (temp & 0xFFFFFFEF) | 0x11;
502 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10); 502 __raw_writel(temp, (resources->dmmu_base) + 0x10);
503 503
504 /* Let the DSP MMU run */ 504 /* Let the DSP MMU run */
505 hw_mmu_enable(resources->dw_dmmu_base); 505 hw_mmu_enable(resources->dmmu_base);
506 506
507 /* Enable the BIOS clock */ 507 /* Enable the BIOS clock */
508 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->hdev_obj,
@@ -566,18 +566,18 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
566 } 566 }
567 if (!status) { 567 if (!status) {
568/*PM_IVA2GRPSEL_PER = 0xC0;*/ 568/*PM_IVA2GRPSEL_PER = 0xC0;*/
569 temp = readl(resources->dw_per_pm_base + 0xA8); 569 temp = readl(resources->per_pm_base + 0xA8);
570 temp = (temp & 0xFFFFFF30) | 0xC0; 570 temp = (temp & 0xFFFFFF30) | 0xC0;
571 writel(temp, resources->dw_per_pm_base + 0xA8); 571 writel(temp, resources->per_pm_base + 0xA8);
572 572
573/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */ 573/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
574 temp = readl(resources->dw_per_pm_base + 0xA4); 574 temp = readl(resources->per_pm_base + 0xA4);
575 temp = (temp & 0xFFFFFF3F); 575 temp = (temp & 0xFFFFFF3F);
576 writel(temp, resources->dw_per_pm_base + 0xA4); 576 writel(temp, resources->per_pm_base + 0xA4);
577/*CM_SLEEPDEP_PER |= 0x04; */ 577/*CM_SLEEPDEP_PER |= 0x04; */
578 temp = readl(resources->dw_per_base + 0x44); 578 temp = readl(resources->per_base + 0x44);
579 temp = (temp & 0xFFFFFFFB) | 0x04; 579 temp = (temp & 0xFFFFFFFB) | 0x04;
580 writel(temp, resources->dw_per_base + 0x44); 580 writel(temp, resources->per_base + 0x44);
581 581
582/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */ 582/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
583 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, 583 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
@@ -586,7 +586,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
586 /* Let DSP go */ 586 /* Let DSP go */
587 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
588 /* Enable DSP MMU Interrupts */ 588 /* Enable DSP MMU Interrupts */
589 hw_mmu_event_enable(resources->dw_dmmu_base, 589 hw_mmu_event_enable(resources->dmmu_base,
590 HW_MMU_ALL_INTERRUPTS); 590 HW_MMU_ALL_INTERRUPTS);
591 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
@@ -726,7 +726,7 @@ static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
726 return status; 726 return status;
727 } 727 }
728 if ((dsp_addr - dev_context->dsp_start_add) < 728 if ((dsp_addr - dev_context->dsp_start_add) <
729 dev_context->dw_internal_size) { 729 dev_context->internal_size) {
730 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr, 730 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
731 ul_num_bytes, mem_type); 731 ul_num_bytes, mem_type);
732 } else { 732 } else {
@@ -767,7 +767,7 @@ static int bridge_dev_create(struct bridge_dev_context
767 dev_context->dsp_start_add = (u32) OMAP_GEM_BASE; 767 dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
768 dev_context->dw_self_loop = (u32) NULL; 768 dev_context->dw_self_loop = (u32) NULL;
769 dev_context->dsp_per_clks = 0; 769 dev_context->dsp_per_clks = 0;
770 dev_context->dw_internal_size = OMAP_DSP_SIZE; 770 dev_context->internal_size = OMAP_DSP_SIZE;
771 /* Clear dev context MMU table entries. 771 /* Clear dev context MMU table entries.
772 * These get set on bridge_io_on_loaded() call after program loaded. */ 772 * These get set on bridge_io_on_loaded() call after program loaded. */
773 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { 773 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
@@ -776,10 +776,10 @@ static int bridge_dev_create(struct bridge_dev_context
776 } 776 }
777 dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) 777 dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
778 (config_param-> 778 (config_param->
779 dw_mem_base 779 mem_base
780 [3]), 780 [3]),
781 config_param-> 781 config_param->
782 dw_mem_length 782 mem_length
783 [3]); 783 [3]);
784 if (!dev_context->dsp_base_addr) 784 if (!dev_context->dsp_base_addr)
785 status = -EPERM; 785 status = -EPERM;
@@ -869,7 +869,7 @@ static int bridge_dev_create(struct bridge_dev_context
869 udelay(5); 869 udelay(5);
870 /* MMU address is obtained from the host 870 /* MMU address is obtained from the host
871 * resources struct */ 871 * resources struct */
872 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; 872 dev_context->dsp_mmu_base = resources->dmmu_base;
873 } 873 }
874 if (!status) { 874 if (!status) {
875 dev_context->hdev_obj = hdev_obj; 875 dev_context->hdev_obj = hdev_obj;
@@ -1001,12 +1001,12 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1001 host_res = dev_context->resources; 1001 host_res = dev_context->resources;
1002 shm_size = drv_datap->shm_size; 1002 shm_size = drv_datap->shm_size;
1003 if (shm_size >= 0x10000) { 1003 if (shm_size >= 0x10000) {
1004 if ((host_res->dw_mem_base[1]) && 1004 if ((host_res->mem_base[1]) &&
1005 (host_res->dw_mem_phys[1])) { 1005 (host_res->mem_phys[1])) {
1006 mem_free_phys_mem((void *) 1006 mem_free_phys_mem((void *)
1007 host_res->dw_mem_base 1007 host_res->mem_base
1008 [1], 1008 [1],
1009 host_res->dw_mem_phys 1009 host_res->mem_phys
1010 [1], shm_size); 1010 [1], shm_size);
1011 } 1011 }
1012 } else { 1012 } else {
@@ -1015,31 +1015,31 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1015 "mem_free_phys_mem\n", __func__, 1015 "mem_free_phys_mem\n", __func__,
1016 status); 1016 status);
1017 } 1017 }
1018 host_res->dw_mem_base[1] = 0; 1018 host_res->mem_base[1] = 0;
1019 host_res->dw_mem_phys[1] = 0; 1019 host_res->mem_phys[1] = 0;
1020 1020
1021 if (host_res->dw_mem_base[0]) 1021 if (host_res->mem_base[0])
1022 iounmap((void *)host_res->dw_mem_base[0]); 1022 iounmap((void *)host_res->mem_base[0]);
1023 if (host_res->dw_mem_base[2]) 1023 if (host_res->mem_base[2])
1024 iounmap((void *)host_res->dw_mem_base[2]); 1024 iounmap((void *)host_res->mem_base[2]);
1025 if (host_res->dw_mem_base[3]) 1025 if (host_res->mem_base[3])
1026 iounmap((void *)host_res->dw_mem_base[3]); 1026 iounmap((void *)host_res->mem_base[3]);
1027 if (host_res->dw_mem_base[4]) 1027 if (host_res->mem_base[4])
1028 iounmap((void *)host_res->dw_mem_base[4]); 1028 iounmap((void *)host_res->mem_base[4]);
1029 if (host_res->dw_dmmu_base) 1029 if (host_res->dmmu_base)
1030 iounmap(host_res->dw_dmmu_base); 1030 iounmap(host_res->dmmu_base);
1031 if (host_res->dw_per_base) 1031 if (host_res->per_base)
1032 iounmap(host_res->dw_per_base); 1032 iounmap(host_res->per_base);
1033 if (host_res->dw_per_pm_base) 1033 if (host_res->per_pm_base)
1034 iounmap((void *)host_res->dw_per_pm_base); 1034 iounmap((void *)host_res->per_pm_base);
1035 if (host_res->core_pm_base) 1035 if (host_res->core_pm_base)
1036 iounmap((void *)host_res->core_pm_base); 1036 iounmap((void *)host_res->core_pm_base);
1037 1037
1038 host_res->dw_mem_base[0] = (u32) NULL; 1038 host_res->mem_base[0] = (u32) NULL;
1039 host_res->dw_mem_base[2] = (u32) NULL; 1039 host_res->mem_base[2] = (u32) NULL;
1040 host_res->dw_mem_base[3] = (u32) NULL; 1040 host_res->mem_base[3] = (u32) NULL;
1041 host_res->dw_mem_base[4] = (u32) NULL; 1041 host_res->mem_base[4] = (u32) NULL;
1042 host_res->dw_dmmu_base = NULL; 1042 host_res->dmmu_base = NULL;
1043 1043
1044 kfree(host_res); 1044 kfree(host_res);
1045 } 1045 }
@@ -1071,7 +1071,7 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1071 copy_bytes, mem_type); 1071 copy_bytes, mem_type);
1072 if (!status) { 1072 if (!status) {
1073 if (dest_addr < (dev_context->dsp_start_add + 1073 if (dest_addr < (dev_context->dsp_start_add +
1074 dev_context->dw_internal_size)) { 1074 dev_context->internal_size)) {
1075 /* Write to Internal memory */ 1075 /* Write to Internal memory */
1076 status = write_dsp_data(dev_ctxt, host_buf, 1076 status = write_dsp_data(dev_ctxt, host_buf,
1077 dest_addr, copy_bytes, 1077 dest_addr, copy_bytes,
@@ -1105,7 +1105,7 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1105 ul_bytes = 1105 ul_bytes =
1106 ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; 1106 ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1107 if (dsp_addr < (dev_context->dsp_start_add + 1107 if (dsp_addr < (dev_context->dsp_start_add +
1108 dev_context->dw_internal_size)) { 1108 dev_context->internal_size)) {
1109 status = 1109 status =
1110 write_dsp_data(dev_ctxt, host_buff, dsp_addr, 1110 write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1111 ul_bytes, mem_type); 1111 ul_bytes, mem_type);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index fff27d4392d..64ca2d246c2 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -434,8 +434,8 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
434 434
435 switch (clock_id) { 435 switch (clock_id) {
436 case BPWR_GP_TIMER5: 436 case BPWR_GP_TIMER5:
437 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 437 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
438 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 438 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
439 if (enable) { 439 if (enable) {
440 iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; 440 iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
441 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; 441 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
@@ -443,12 +443,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
443 mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; 443 mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
444 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; 444 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
445 } 445 }
446 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 446 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
447 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 447 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
448 break; 448 break;
449 case BPWR_GP_TIMER6: 449 case BPWR_GP_TIMER6:
450 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 450 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
451 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 451 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
452 if (enable) { 452 if (enable) {
453 iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; 453 iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
454 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; 454 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
@@ -456,12 +456,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
456 mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; 456 mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
457 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; 457 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
458 } 458 }
459 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 459 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
460 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 460 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
461 break; 461 break;
462 case BPWR_GP_TIMER7: 462 case BPWR_GP_TIMER7:
463 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 463 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
464 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 464 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
465 if (enable) { 465 if (enable) {
466 iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; 466 iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
467 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; 467 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
@@ -469,12 +469,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
469 mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; 469 mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
470 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; 470 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
471 } 471 }
472 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 472 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
473 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 473 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
474 break; 474 break;
475 case BPWR_GP_TIMER8: 475 case BPWR_GP_TIMER8:
476 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 476 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
477 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 477 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
478 if (enable) { 478 if (enable) {
479 iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; 479 iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
480 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; 480 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
@@ -482,8 +482,8 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
482 mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; 482 mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
483 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; 483 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
484 } 484 }
485 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 485 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
486 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 486 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
487 break; 487 break;
488 case BPWR_MCBSP1: 488 case BPWR_MCBSP1:
489 iva2_grpsel = readl(resources->core_pm_base + 0xA8); 489 iva2_grpsel = readl(resources->core_pm_base + 0xA8);
@@ -499,8 +499,8 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
499 writel(mpu_grpsel, resources->core_pm_base + 0xA4); 499 writel(mpu_grpsel, resources->core_pm_base + 0xA4);
500 break; 500 break;
501 case BPWR_MCBSP2: 501 case BPWR_MCBSP2:
502 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 502 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
503 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 503 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
504 if (enable) { 504 if (enable) {
505 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; 505 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
506 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; 506 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
@@ -508,12 +508,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
508 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; 508 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
509 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; 509 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
510 } 510 }
511 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 511 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
512 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 512 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
513 break; 513 break;
514 case BPWR_MCBSP3: 514 case BPWR_MCBSP3:
515 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 515 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
516 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 516 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
517 if (enable) { 517 if (enable) {
518 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; 518 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
519 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; 519 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
@@ -521,12 +521,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
521 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; 521 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
522 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; 522 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
523 } 523 }
524 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 524 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
525 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 525 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
526 break; 526 break;
527 case BPWR_MCBSP4: 527 case BPWR_MCBSP4:
528 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 528 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
529 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 529 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
530 if (enable) { 530 if (enable) {
531 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; 531 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
532 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; 532 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
@@ -534,12 +534,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
534 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; 534 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
535 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; 535 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
536 } 536 }
537 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 537 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
538 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 538 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
539 break; 539 break;
540 case BPWR_MCBSP5: 540 case BPWR_MCBSP5:
541 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 541 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
542 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 542 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
543 if (enable) { 543 if (enable) {
544 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; 544 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
545 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; 545 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
@@ -547,8 +547,8 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
547 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; 547 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
548 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; 548 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
549 } 549 }
550 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 550 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
551 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 551 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
552 break; 552 break;
553 } 553 }
554} 554}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 09c9e873ce7..574feade6ef 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -197,16 +197,16 @@ int write_dsp_data(struct bridge_dev_context *dev_context,
197 197
198 offset = dsp_addr - dev_context->dsp_start_add; 198 offset = dsp_addr - dev_context->dsp_start_add;
199 if (offset < base1) { 199 if (offset < base1) {
200 dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2], 200 dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2],
201 resources->dw_mem_length[2]); 201 resources->mem_length[2]);
202 } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { 202 } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
203 dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3], 203 dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3],
204 resources->dw_mem_length[3]); 204 resources->mem_length[3]);
205 offset = offset - base2; 205 offset = offset - base2;
206 } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && 206 } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
207 offset < base3 + OMAP_DSP_MEM3_SIZE) { 207 offset < base3 + OMAP_DSP_MEM3_SIZE) {
208 dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4], 208 dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4],
209 resources->dw_mem_length[4]); 209 resources->mem_length[4]);
210 offset = offset - base3; 210 offset = offset - base3;
211 } else { 211 } else {
212 return -EPERM; 212 return -EPERM;
@@ -339,7 +339,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->atlb_entry[0].ul_gpp_va; 340 dev_context->atlb_entry[0].ul_gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=
344 (ul_ext_base - ul_dyn_ext_base); 344 (ul_ext_base - ul_dyn_ext_base);
345 } 345 }
@@ -437,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
437 omap_mbox_restore_ctx(dev_context->mbox); 437 omap_mbox_restore_ctx(dev_context->mbox);
438 438
439 /* Access MMU SYS CONFIG register to generate a short wakeup */ 439 /* Access MMU SYS CONFIG register to generate a short wakeup */
440 temp = readl(resources->dw_dmmu_base + 0x10); 440 temp = readl(resources->dmmu_base + 0x10);
441 441
442 dev_context->brd_state = BRD_RUNNING; 442 dev_context->brd_state = BRD_RUNNING;
443 } else if (dev_context->brd_state == BRD_RETENTION) { 443 } else if (dev_context->brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index 875a65c7f6d..bc2feff662b 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -59,9 +59,9 @@ static irqreturn_t mmu_fault_isr(int irq, void *data)
59 return IRQ_HANDLED; 59 return IRQ_HANDLED;
60 } 60 }
61 61
62 hw_mmu_event_status(resources->dw_dmmu_base, &event); 62 hw_mmu_event_status(resources->dmmu_base, &event);
63 if (event == HW_MMU_TRANSLATION_FAULT) { 63 if (event == HW_MMU_TRANSLATION_FAULT) {
64 hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr); 64 hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr);
65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, 65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
66 event, fault_addr); 66 event, fault_addr);
67 /* 67 /*
@@ -73,10 +73,10 @@ static irqreturn_t mmu_fault_isr(int irq, void *data)
73 73
74 /* Disable the MMU events, else once we clear it will 74 /* Disable the MMU events, else once we clear it will
75 * start to raise INTs again */ 75 * start to raise INTs again */
76 hw_mmu_event_disable(resources->dw_dmmu_base, 76 hw_mmu_event_disable(resources->dmmu_base,
77 HW_MMU_TRANSLATION_FAULT); 77 HW_MMU_TRANSLATION_FAULT);
78 } else { 78 } else {
79 hw_mmu_event_disable(resources->dw_dmmu_base, 79 hw_mmu_event_disable(resources->dmmu_base,
80 HW_MMU_ALL_INTERRUPTS); 80 HW_MMU_ALL_INTERRUPTS);
81 } 81 }
82 return IRQ_HANDLED; 82 return IRQ_HANDLED;
@@ -185,10 +185,10 @@ static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
185 * access entry #0. Then add a new entry so that the DSP OS 185 * access entry #0. Then add a new entry so that the DSP OS
186 * can continue in order to dump the stack. 186 * can continue in order to dump the stack.
187 */ 187 */
188 hw_mmu_twl_disable(resources->dw_dmmu_base); 188 hw_mmu_twl_disable(resources->dmmu_base);
189 hw_mmu_tlb_flush_all(resources->dw_dmmu_base); 189 hw_mmu_tlb_flush_all(resources->dmmu_base);
190 190
191 hw_mmu_tlb_add(resources->dw_dmmu_base, 191 hw_mmu_tlb_add(resources->dmmu_base,
192 virt_to_phys(dummy_va_addr), fault_addr, 192 virt_to_phys(dummy_va_addr), fault_addr,
193 HW_PAGE_SIZE4KB, 1, 193 HW_PAGE_SIZE4KB, 1,
194 &map_attrs, HW_SET, HW_SET); 194 &map_attrs, HW_SET, HW_SET);
@@ -198,12 +198,12 @@ static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); 198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
199 199
200 /* Clear MMU interrupt */ 200 /* Clear MMU interrupt */
201 hw_mmu_event_ack(resources->dw_dmmu_base, 201 hw_mmu_event_ack(resources->dmmu_base,
202 HW_MMU_TRANSLATION_FAULT); 202 HW_MMU_TRANSLATION_FAULT);
203 dump_dsp_stack(dev_context); 203 dump_dsp_stack(dev_context);
204 dsp_clk_disable(DSP_CLK_GPT8); 204 dsp_clk_disable(DSP_CLK_GPT8);
205 205
206 hw_mmu_disable(resources->dw_dmmu_base); 206 hw_mmu_disable(resources->dmmu_base);
207 free_page((unsigned long)dummy_va_addr); 207 free_page((unsigned long)dummy_va_addr);
208} 208}
209#endif 209#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
index 14b0567e531..ea547380012 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
@@ -119,8 +119,8 @@ struct chnl_mgr {
119 struct dev_object *hdev_obj; 119 struct dev_object *hdev_obj;
120 120
121 /* These fields initialized in bridge_chnl_create(): */ 121 /* These fields initialized in bridge_chnl_create(): */
122 u32 dw_output_mask; /* Host output channels w/ full buffers */ 122 u32 output_mask; /* Host output channels w/ full buffers */
123 u32 dw_last_output; /* Last output channel fired from DPC */ 123 u32 last_output; /* Last output channel fired from DPC */
124 /* Critical section object handle */ 124 /* Critical section object handle */
125 spinlock_t chnl_mgr_lock; 125 spinlock_t chnl_mgr_lock;
126 u32 word_size; /* Size in bytes of DSP word */ 126 u32 word_size; /* Size in bytes of DSP word */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index f7c105af3da..60a278136bd 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -34,28 +34,28 @@ struct cfg_devnode;
34struct cfg_hostres { 34struct cfg_hostres {
35 u32 num_mem_windows; /* Set to default */ 35 u32 num_mem_windows; /* Set to default */
36 /* This is the base.memory */ 36 /* This is the base.memory */
37 u32 dw_mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */ 37 u32 mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */
38 u32 dw_mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */ 38 u32 mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */
39 u32 dw_mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */ 39 u32 mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */
40 u8 birq_registers; /* IRQ Number */ 40 u8 birq_registers; /* IRQ Number */
41 u8 birq_attrib; /* IRQ Attribute */ 41 u8 birq_attrib; /* IRQ Attribute */
42 u32 dw_offset_for_monitor; /* The Shared memory starts from 42 u32 offset_for_monitor; /* The Shared memory starts from
43 * dw_mem_base + this offset */ 43 * mem_base + this offset */
44 /* 44 /*
45 * Info needed by NODE for allocating channels to communicate with RMS: 45 * Info needed by NODE for allocating channels to communicate with RMS:
46 * chnl_offset: Offset of RMS channels. Lower channels are 46 * chnl_offset: Offset of RMS channels. Lower channels are
47 * reserved. 47 * reserved.
48 * chnl_buf_size: Size of channel buffer to send to RMS 48 * chnl_buf_size: Size of channel buffer to send to RMS
49 * dw_num_chnls: Total number of channels 49 * num_chnls: Total number of channels
50 * (including reserved). 50 * (including reserved).
51 */ 51 */
52 u32 chnl_offset; 52 u32 chnl_offset;
53 u32 chnl_buf_size; 53 u32 chnl_buf_size;
54 u32 dw_num_chnls; 54 u32 num_chnls;
55 void __iomem *dw_per_base; 55 void __iomem *per_base;
56 u32 dw_per_pm_base; 56 u32 per_pm_base;
57 u32 core_pm_base; 57 u32 core_pm_base;
58 void __iomem *dw_dmmu_base; 58 void __iomem *dmmu_base;
59}; 59};
60 60
61#endif /* CFGDEFS_ */ 61#endif /* CFGDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
index 1785c3e8371..29e66dd525e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
@@ -58,7 +58,7 @@ struct chnl_info {
58 void *event_obj; /* Channel I/O completion event. */ 58 void *event_obj; /* Channel I/O completion event. */
59 /*Abstraction of I/O completion event. */ 59 /*Abstraction of I/O completion event. */
60 struct sync_object *sync_event; 60 struct sync_object *sync_event;
61 s8 dw_mode; /* Channel mode. */ 61 s8 mode; /* Channel mode. */
62 u8 dw_state; /* Current channel state. */ 62 u8 dw_state; /* Current channel state. */
63 u32 bytes_tx; /* Total bytes transferred. */ 63 u32 bytes_tx; /* Total bytes transferred. */
64 u32 cio_cs; /* Number of IOCs in queue. */ 64 u32 cio_cs; /* Number of IOCs in queue. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
index 943d91f809e..8cd1494ccc8 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
@@ -51,7 +51,7 @@ struct cmm_attrs {
51 */ 51 */
52 52
53struct cmm_seginfo { 53struct cmm_seginfo {
54 u32 dw_seg_base_pa; /* Start Phys address of SM segment */ 54 u32 seg_base_pa; /* Start Phys address of SM segment */
55 /* Total size in bytes of segment: DSP+GPP */ 55 /* Total size in bytes of segment: DSP+GPP */
56 u32 ul_total_seg_size; 56 u32 ul_total_seg_size;
57 u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */ 57 u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index f7542a5b10d..d2fb6a4c0e3 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -49,7 +49,7 @@
49#include <dspbridge/cmm.h> 49#include <dspbridge/cmm.h>
50 50
51/* ----------------------------------- Defines, Data Structures, Typedefs */ 51/* ----------------------------------- Defines, Data Structures, Typedefs */
52#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size) 52#define NEXT_PA(pnode) (pnode->pa + pnode->ul_size)
53 53
54/* Other bus/platform translations */ 54/* Other bus/platform translations */
55#define DSPPA2GPPPA(base, x, y) ((x)+(y)) 55#define DSPPA2GPPPA(base, x, y) ((x)+(y))
@@ -99,7 +99,7 @@ struct cmm_object {
99 struct mutex cmm_lock; /* Lock to access cmm mgr */ 99 struct mutex cmm_lock; /* Lock to access cmm mgr */
100 struct list_head node_free_list; /* Free list of memory nodes */ 100 struct list_head node_free_list; /* Free list of memory nodes */
101 u32 ul_min_block_size; /* Min SM block; default 16 bytes */ 101 u32 ul_min_block_size; /* Min SM block; default 16 bytes */
102 u32 dw_page_size; /* Memory Page size (1k/4k) */ 102 u32 page_size; /* Memory Page size (1k/4k) */
103 /* GPP SM segment ptrs */ 103 /* GPP SM segment ptrs */
104 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; 104 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
105}; 105};
@@ -128,7 +128,7 @@ static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
128/* SM node representing a block of memory. */ 128/* SM node representing a block of memory. */
129struct cmm_mnode { 129struct cmm_mnode {
130 struct list_head link; /* must be 1st element */ 130 struct list_head link; /* must be 1st element */
131 u32 dw_pa; /* Phys addr */ 131 u32 pa; /* Phys addr */
132 u32 dw_va; /* Virtual address in device process context */ 132 u32 dw_va; /* Virtual address in device process context */
133 u32 ul_size; /* SM block size in bytes */ 133 u32 ul_size; /* SM block size in bytes */
134 u32 client_proc; /* Process that allocated this mem block */ 134 u32 client_proc; /* Process that allocated this mem block */
@@ -199,7 +199,7 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
199 /* create a new block with the leftovers and 199 /* create a new block with the leftovers and
200 * add to freelist */ 200 * add to freelist */
201 new_node = 201 new_node =
202 get_node(cmm_mgr_obj, pnode->dw_pa + usize, 202 get_node(cmm_mgr_obj, pnode->pa + usize,
203 pnode->dw_va + usize, 203 pnode->dw_va + usize,
204 (u32) delta_size); 204 (u32) delta_size);
205 /* leftovers go free */ 205 /* leftovers go free */
@@ -216,7 +216,7 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
216 216
217 /* put our node on InUse list */ 217 /* put our node on InUse list */
218 list_add_tail(&pnode->link, &allocator->in_use_list); 218 list_add_tail(&pnode->link, &allocator->in_use_list);
219 buf_pa = (void *)pnode->dw_pa; /* physical address */ 219 buf_pa = (void *)pnode->pa; /* physical address */
220 /* clear mem */ 220 /* clear mem */
221 pbyte = (u8 *) pnode->dw_va; 221 pbyte = (u8 *) pnode->dw_va;
222 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) 222 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
@@ -260,7 +260,7 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
260 DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4); 260 DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
261 /* save away smallest block allocation for this cmm mgr */ 261 /* save away smallest block allocation for this cmm mgr */
262 cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size; 262 cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
263 cmm_obj->dw_page_size = PAGE_SIZE; 263 cmm_obj->page_size = PAGE_SIZE;
264 264
265 /* create node free list */ 265 /* create node free list */
266 INIT_LIST_HEAD(&cmm_obj->node_free_list); 266 INIT_LIST_HEAD(&cmm_obj->node_free_list);
@@ -369,7 +369,7 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
369 369
370 mutex_lock(&cmm_mgr_obj->cmm_lock); 370 mutex_lock(&cmm_mgr_obj->cmm_lock);
371 list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) { 371 list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
372 if (curr->dw_pa == (u32) buf_pa) { 372 if (curr->pa == (u32) buf_pa) {
373 list_del(&curr->link); 373 list_del(&curr->link);
374 add_to_free_list(allocator, curr); 374 add_to_free_list(allocator, curr);
375 status = 0; 375 status = 0;
@@ -438,7 +438,7 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
438 if (!altr) 438 if (!altr)
439 continue; 439 continue;
440 cmm_info_obj->ul_num_gppsm_segs++; 440 cmm_info_obj->ul_num_gppsm_segs++;
441 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa = 441 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
442 altr->shm_base - altr->ul_dsp_size; 442 altr->shm_base - altr->ul_dsp_size;
443 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size = 443 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
444 altr->ul_dsp_size + altr->ul_sm_size; 444 altr->ul_dsp_size + altr->ul_sm_size;
@@ -704,7 +704,7 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
704 list_del_init(&pnode->link); 704 list_del_init(&pnode->link);
705 } 705 }
706 706
707 pnode->dw_pa = dw_pa; 707 pnode->pa = dw_pa;
708 pnode->dw_va = dw_va; 708 pnode->dw_va = dw_va;
709 pnode->ul_size = ul_size; 709 pnode->ul_size = ul_size;
710 710
@@ -763,13 +763,13 @@ static void add_to_free_list(struct cmm_allocator *allocator,
763 } 763 }
764 764
765 list_for_each_entry(curr, &allocator->free_list, link) { 765 list_for_each_entry(curr, &allocator->free_list, link) {
766 if (NEXT_PA(curr) == node->dw_pa) { 766 if (NEXT_PA(curr) == node->pa) {
767 curr->ul_size += node->ul_size; 767 curr->ul_size += node->ul_size;
768 delete_node(allocator->hcmm_mgr, node); 768 delete_node(allocator->hcmm_mgr, node);
769 return; 769 return;
770 } 770 }
771 if (curr->dw_pa == NEXT_PA(node)) { 771 if (curr->pa == NEXT_PA(node)) {
772 curr->dw_pa = node->dw_pa; 772 curr->pa = node->pa;
773 curr->dw_va = node->dw_va; 773 curr->dw_va = node->dw_va;
774 curr->ul_size += node->ul_size; 774 curr->ul_size += node->ul_size;
775 delete_node(allocator->hcmm_mgr, node); 775 delete_node(allocator->hcmm_mgr, node);
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 0f10a50cf6f..e328dc1e169 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -213,11 +213,11 @@ int dev_create_device(struct dev_object **device_obj,
213 num_windows = host_res->num_mem_windows; 213 num_windows = host_res->num_mem_windows;
214 if (num_windows) { 214 if (num_windows) {
215 /* Assume last memory window is for CHNL */ 215 /* Assume last memory window is for CHNL */
216 io_mgr_attrs.shm_base = host_res->dw_mem_base[1] + 216 io_mgr_attrs.shm_base = host_res->mem_base[1] +
217 host_res->dw_offset_for_monitor; 217 host_res->offset_for_monitor;
218 io_mgr_attrs.usm_length = 218 io_mgr_attrs.usm_length =
219 host_res->dw_mem_length[1] - 219 host_res->mem_length[1] -
220 host_res->dw_offset_for_monitor; 220 host_res->offset_for_monitor;
221 } else { 221 } else {
222 io_mgr_attrs.shm_base = 0; 222 io_mgr_attrs.shm_base = 0;
223 io_mgr_attrs.usm_length = 0; 223 io_mgr_attrs.usm_length = 0;
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 3efe1d50a4c..575243882d0 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -68,7 +68,7 @@
68/* Device IOCtl function pointer */ 68/* Device IOCtl function pointer */
69struct api_cmd { 69struct api_cmd {
70 u32(*fxn) (union trapped_args *args, void *pr_ctxt); 70 u32(*fxn) (union trapped_args *args, void *pr_ctxt);
71 u32 dw_index; 71 u32 index;
72}; 72};
73 73
74/* ----------------------------------- Globals */ 74/* ----------------------------------- Globals */
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 2e7330272b1..9aacbcb38eb 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -687,9 +687,9 @@ static int request_bridge_resources(struct cfg_hostres *res)
687 host_res->num_mem_windows = 2; 687 host_res->num_mem_windows = 2;
688 688
689 /* First window is for DSP internal memory */ 689 /* First window is for DSP internal memory */
690 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 690 dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
691 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 691 dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
692 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); 692 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
693 693
694 /* for 24xx base port is not mapping the mamory for DSP 694 /* for 24xx base port is not mapping the mamory for DSP
695 * internal memory TODO Do a ioremap here */ 695 * internal memory TODO Do a ioremap here */
@@ -698,10 +698,10 @@ static int request_bridge_resources(struct cfg_hostres *res)
698 /* These are hard-coded values */ 698 /* These are hard-coded values */
699 host_res->birq_registers = 0; 699 host_res->birq_registers = 0;
700 host_res->birq_attrib = 0; 700 host_res->birq_attrib = 0;
701 host_res->dw_offset_for_monitor = 0; 701 host_res->offset_for_monitor = 0;
702 host_res->chnl_offset = 0; 702 host_res->chnl_offset = 0;
703 /* CHNL_MAXCHANNELS */ 703 /* CHNL_MAXCHANNELS */
704 host_res->dw_num_chnls = CHNL_MAXCHANNELS; 704 host_res->num_chnls = CHNL_MAXCHANNELS;
705 host_res->chnl_buf_size = 0x400; 705 host_res->chnl_buf_size = 0x400;
706 706
707 return 0; 707 return 0;
@@ -730,51 +730,51 @@ int drv_request_bridge_res_dsp(void **phost_resources)
730 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ 730 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
731 host_res->num_mem_windows = 4; 731 host_res->num_mem_windows = 4;
732 732
733 host_res->dw_mem_base[0] = 0; 733 host_res->mem_base[0] = 0;
734 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE, 734 host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
735 OMAP_DSP_MEM1_SIZE); 735 OMAP_DSP_MEM1_SIZE);
736 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE, 736 host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
737 OMAP_DSP_MEM2_SIZE); 737 OMAP_DSP_MEM2_SIZE);
738 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE, 738 host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
739 OMAP_DSP_MEM3_SIZE); 739 OMAP_DSP_MEM3_SIZE);
740 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE, 740 host_res->per_base = ioremap(OMAP_PER_CM_BASE,
741 OMAP_PER_CM_SIZE); 741 OMAP_PER_CM_SIZE);
742 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE, 742 host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
743 OMAP_PER_PRM_SIZE); 743 OMAP_PER_PRM_SIZE);
744 host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 744 host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
745 OMAP_CORE_PRM_SIZE); 745 OMAP_CORE_PRM_SIZE);
746 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, 746 host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
747 OMAP_DMMU_SIZE); 747 OMAP_DMMU_SIZE);
748 748
749 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 749 dev_dbg(bridge, "mem_base[0] 0x%x\n",
750 host_res->dw_mem_base[0]); 750 host_res->mem_base[0]);
751 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n", 751 dev_dbg(bridge, "mem_base[1] 0x%x\n",
752 host_res->dw_mem_base[1]); 752 host_res->mem_base[1]);
753 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n", 753 dev_dbg(bridge, "mem_base[2] 0x%x\n",
754 host_res->dw_mem_base[2]); 754 host_res->mem_base[2]);
755 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", 755 dev_dbg(bridge, "mem_base[3] 0x%x\n",
756 host_res->dw_mem_base[3]); 756 host_res->mem_base[3]);
757 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 757 dev_dbg(bridge, "mem_base[4] 0x%x\n",
758 host_res->dw_mem_base[4]); 758 host_res->mem_base[4]);
759 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); 759 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
760 760
761 shm_size = drv_datap->shm_size; 761 shm_size = drv_datap->shm_size;
762 if (shm_size >= 0x10000) { 762 if (shm_size >= 0x10000) {
763 /* Allocate Physically contiguous, 763 /* Allocate Physically contiguous,
764 * non-cacheable memory */ 764 * non-cacheable memory */
765 host_res->dw_mem_base[1] = 765 host_res->mem_base[1] =
766 (u32) mem_alloc_phys_mem(shm_size, 0x100000, 766 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
767 &dma_addr); 767 &dma_addr);
768 if (host_res->dw_mem_base[1] == 0) { 768 if (host_res->mem_base[1] == 0) {
769 status = -ENOMEM; 769 status = -ENOMEM;
770 pr_err("shm reservation Failed\n"); 770 pr_err("shm reservation Failed\n");
771 } else { 771 } else {
772 host_res->dw_mem_length[1] = shm_size; 772 host_res->mem_length[1] = shm_size;
773 host_res->dw_mem_phys[1] = dma_addr; 773 host_res->mem_phys[1] = dma_addr;
774 774
775 dev_dbg(bridge, "%s: Bridge shm address 0x%x " 775 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
776 "dma_addr %x size %x\n", __func__, 776 "dma_addr %x size %x\n", __func__,
777 host_res->dw_mem_base[1], 777 host_res->mem_base[1],
778 dma_addr, shm_size); 778 dma_addr, shm_size);
779 } 779 }
780 } 780 }
@@ -782,10 +782,10 @@ int drv_request_bridge_res_dsp(void **phost_resources)
782 /* These are hard-coded values */ 782 /* These are hard-coded values */
783 host_res->birq_registers = 0; 783 host_res->birq_registers = 0;
784 host_res->birq_attrib = 0; 784 host_res->birq_attrib = 0;
785 host_res->dw_offset_for_monitor = 0; 785 host_res->offset_for_monitor = 0;
786 host_res->chnl_offset = 0; 786 host_res->chnl_offset = 0;
787 /* CHNL_MAXCHANNELS */ 787 /* CHNL_MAXCHANNELS */
788 host_res->dw_num_chnls = CHNL_MAXCHANNELS; 788 host_res->num_chnls = CHNL_MAXCHANNELS;
789 host_res->chnl_buf_size = 0x400; 789 host_res->chnl_buf_size = 0x400;
790 dw_buff_size = sizeof(struct cfg_hostres); 790 dw_buff_size = sizeof(struct cfg_hostres);
791 } 791 }
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 5a045c75c56..454fcc82584 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -621,7 +621,7 @@ func_cont:
621 goto func_end; 621 goto func_end;
622 } 622 }
623 623
624 ul_gpp_mem_base = (u32) host_res->dw_mem_base[1]; 624 ul_gpp_mem_base = (u32) host_res->mem_base[1];
625 off_set = pul_value - dynext_base; 625 off_set = pul_value - dynext_base;
626 ul_stack_seg_addr = ul_gpp_mem_base + off_set; 626 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
627 ul_stack_seg_val = readl(ul_stack_seg_addr); 627 ul_stack_seg_val = readl(ul_stack_seg_addr);
@@ -2904,7 +2904,7 @@ static int get_proc_props(struct node_mgr *hnode_mgr,
2904 return -EPERM; 2904 return -EPERM;
2905 hnode_mgr->ul_chnl_offset = host_res->chnl_offset; 2905 hnode_mgr->ul_chnl_offset = host_res->chnl_offset;
2906 hnode_mgr->ul_chnl_buf_size = host_res->chnl_buf_size; 2906 hnode_mgr->ul_chnl_buf_size = host_res->chnl_buf_size;
2907 hnode_mgr->ul_num_chnls = host_res->dw_num_chnls; 2907 hnode_mgr->ul_num_chnls = host_res->num_chnls;
2908 2908
2909 /* 2909 /*
2910 * PROC will add an API to get dsp_processorinfo. 2910 * PROC will add an API to get dsp_processorinfo.