diff options
| author | Greg Kroah-Hartman <gregkh@suse.de> | 2010-11-11 08:12:34 -0500 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-11-11 08:14:54 -0500 |
| commit | 94fb7c9c5d40edd538d7f2e048af5ab9ff55ef73 (patch) | |
| tree | 19a48a7033656bcdcbbaabf8cb6347562a2d1a94 /drivers | |
| parent | 307ae1d3d05e0379211277cc652c462d36873984 (diff) | |
| parent | 50ad26f4c9710a64c3728f08c3fa6f4b6a869376 (diff) | |
Staging: Merge 'tidspbridge-2.6.37-rc1' into staging-linus
This is a big revert of a lot of -rc1 tidspbridge patches in order to
get the driver back into a working state. It also includes a OMAP patch
that was approved by the OMAP maintainer.
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
31 files changed, 3468 insertions, 571 deletions
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig index ff64d464143c..93de4f2e8bf8 100644 --- a/drivers/staging/tidspbridge/Kconfig +++ b/drivers/staging/tidspbridge/Kconfig | |||
| @@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE | |||
| 6 | tristate "DSP Bridge driver" | 6 | tristate "DSP Bridge driver" |
| 7 | depends on ARCH_OMAP3 | 7 | depends on ARCH_OMAP3 |
| 8 | select OMAP_MBOX_FWK | 8 | select OMAP_MBOX_FWK |
| 9 | select OMAP_IOMMU | ||
| 10 | help | 9 | help |
| 11 | DSP/BIOS Bridge is designed for platforms that contain a GPP and | 10 | DSP/BIOS Bridge is designed for platforms that contain a GPP and |
| 12 | one or more attached DSPs. The GPP is considered the master or | 11 | one or more attached DSPs. The GPP is considered the master or |
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile index 50decc2935c5..41c644c3318f 100644 --- a/drivers/staging/tidspbridge/Makefile +++ b/drivers/staging/tidspbridge/Makefile | |||
| @@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o | |||
| 2 | 2 | ||
| 3 | libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o | 3 | libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o |
| 4 | libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ | 4 | libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ |
| 5 | core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ | 5 | core/tiomap3430_pwr.o core/tiomap_io.o \ |
| 6 | core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o | 6 | core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o |
| 7 | libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ | 7 | libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ |
| 8 | pmgr/cmm.o pmgr/dbll.o | 8 | pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o |
| 9 | librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ | 9 | librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ |
| 10 | rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ | 10 | rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ |
| 11 | rmgr/nldr.o rmgr/drv_interface.o | 11 | rmgr/nldr.o rmgr/drv_interface.o |
| 12 | libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ | 12 | libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ |
| 13 | dynload/tramp.o | 13 | dynload/tramp.o |
| 14 | libhw = hw/hw_mmu.o | ||
| 14 | 15 | ||
| 15 | bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ | 16 | bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ |
| 16 | $(libdload) | 17 | $(libdload) $(libhw) |
| 17 | 18 | ||
| 18 | #Machine dependent | 19 | #Machine dependent |
| 19 | ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ | 20 | ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ |
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h index 8ae263387a87..16723cd34831 100644 --- a/drivers/staging/tidspbridge/core/_deh.h +++ b/drivers/staging/tidspbridge/core/_deh.h | |||
| @@ -27,8 +27,9 @@ | |||
| 27 | struct deh_mgr { | 27 | struct deh_mgr { |
| 28 | struct bridge_dev_context *hbridge_context; /* Bridge context. */ | 28 | struct bridge_dev_context *hbridge_context; /* Bridge context. */ |
| 29 | struct ntfy_object *ntfy_obj; /* NTFY object */ | 29 | struct ntfy_object *ntfy_obj; /* NTFY object */ |
| 30 | }; | ||
| 31 | 30 | ||
| 32 | int mmu_fault_isr(struct iommu *mmu); | 31 | /* MMU Fault DPC */ |
| 32 | struct tasklet_struct dpc_tasklet; | ||
| 33 | }; | ||
| 33 | 34 | ||
| 34 | #endif /* _DEH_ */ | 35 | #endif /* _DEH_ */ |
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h index e0a801c1cb98..1c1f157e167a 100644 --- a/drivers/staging/tidspbridge/core/_tiomap.h +++ b/drivers/staging/tidspbridge/core/_tiomap.h | |||
| @@ -23,8 +23,8 @@ | |||
| 23 | #include <plat/clockdomain.h> | 23 | #include <plat/clockdomain.h> |
| 24 | #include <mach-omap2/prm-regbits-34xx.h> | 24 | #include <mach-omap2/prm-regbits-34xx.h> |
| 25 | #include <mach-omap2/cm-regbits-34xx.h> | 25 | #include <mach-omap2/cm-regbits-34xx.h> |
| 26 | #include <dspbridge/dsp-mmu.h> | ||
| 27 | #include <dspbridge/devdefs.h> | 26 | #include <dspbridge/devdefs.h> |
| 27 | #include <hw_defs.h> | ||
| 28 | #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ | 28 | #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ |
| 29 | #include <dspbridge/sync.h> | 29 | #include <dspbridge/sync.h> |
| 30 | #include <dspbridge/clk.h> | 30 | #include <dspbridge/clk.h> |
| @@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = { | |||
| 306 | 306 | ||
| 307 | #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) | 307 | #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) |
| 308 | 308 | ||
| 309 | struct shm_segs { | ||
| 310 | u32 seg0_da; | ||
| 311 | u32 seg0_pa; | ||
| 312 | u32 seg0_va; | ||
| 313 | u32 seg0_size; | ||
| 314 | u32 seg1_da; | ||
| 315 | u32 seg1_pa; | ||
| 316 | u32 seg1_va; | ||
| 317 | u32 seg1_size; | ||
| 318 | }; | ||
| 319 | |||
| 320 | |||
| 321 | /* This Bridge driver's device context: */ | 309 | /* This Bridge driver's device context: */ |
| 322 | struct bridge_dev_context { | 310 | struct bridge_dev_context { |
| 323 | struct dev_object *hdev_obj; /* Handle to Bridge device object. */ | 311 | struct dev_object *hdev_obj; /* Handle to Bridge device object. */ |
| @@ -328,6 +316,7 @@ struct bridge_dev_context { | |||
| 328 | */ | 316 | */ |
| 329 | u32 dw_dsp_ext_base_addr; /* See the comment above */ | 317 | u32 dw_dsp_ext_base_addr; /* See the comment above */ |
| 330 | u32 dw_api_reg_base; /* API mem map'd registers */ | 318 | u32 dw_api_reg_base; /* API mem map'd registers */ |
| 319 | void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */ | ||
| 331 | u32 dw_api_clk_base; /* CLK Registers */ | 320 | u32 dw_api_clk_base; /* CLK Registers */ |
| 332 | u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ | 321 | u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ |
| 333 | u32 dw_public_rhea; /* Pub Rhea */ | 322 | u32 dw_public_rhea; /* Pub Rhea */ |
| @@ -339,8 +328,7 @@ struct bridge_dev_context { | |||
| 339 | u32 dw_internal_size; /* Internal memory size */ | 328 | u32 dw_internal_size; /* Internal memory size */ |
| 340 | 329 | ||
| 341 | struct omap_mbox *mbox; /* Mail box handle */ | 330 | struct omap_mbox *mbox; /* Mail box handle */ |
| 342 | struct iommu *dsp_mmu; /* iommu for iva2 handler */ | 331 | |
| 343 | struct shm_segs sh_s; | ||
| 344 | struct cfg_hostres *resources; /* Host Resources */ | 332 | struct cfg_hostres *resources; /* Host Resources */ |
| 345 | 333 | ||
| 346 | /* | 334 | /* |
| @@ -353,6 +341,7 @@ struct bridge_dev_context { | |||
| 353 | 341 | ||
| 354 | /* TC Settings */ | 342 | /* TC Settings */ |
| 355 | bool tc_word_swap_on; /* Traffic Controller Word Swap */ | 343 | bool tc_word_swap_on; /* Traffic Controller Word Swap */ |
| 344 | struct pg_table_attrs *pt_attrs; | ||
| 356 | u32 dsp_per_clks; | 345 | u32 dsp_per_clks; |
| 357 | }; | 346 | }; |
| 358 | 347 | ||
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c deleted file mode 100644 index 983c95adc8ff..000000000000 --- a/drivers/staging/tidspbridge/core/dsp-mmu.c +++ /dev/null | |||
| @@ -1,317 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * dsp-mmu.c | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * DSP iommu. | ||
| 7 | * | ||
| 8 | * Copyright (C) 2010 Texas Instruments, Inc. | ||
| 9 | * | ||
| 10 | * This package is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <dspbridge/host_os.h> | ||
| 20 | #include <plat/dmtimer.h> | ||
| 21 | #include <dspbridge/dbdefs.h> | ||
| 22 | #include <dspbridge/dev.h> | ||
| 23 | #include <dspbridge/io_sm.h> | ||
| 24 | #include <dspbridge/dspdeh.h> | ||
| 25 | #include "_tiomap.h" | ||
| 26 | |||
| 27 | #include <dspbridge/dsp-mmu.h> | ||
| 28 | |||
| 29 | #define MMU_CNTL_TWL_EN (1 << 2) | ||
| 30 | |||
| 31 | static struct tasklet_struct mmu_tasklet; | ||
| 32 | |||
| 33 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
| 34 | static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) | ||
| 35 | { | ||
| 36 | void *dummy_addr; | ||
| 37 | u32 fa, tmp; | ||
| 38 | struct iotlb_entry e; | ||
| 39 | struct iommu *mmu = dev_context->dsp_mmu; | ||
| 40 | dummy_addr = (void *)__get_free_page(GFP_ATOMIC); | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Before acking the MMU fault, let's make sure MMU can only | ||
| 44 | * access entry #0. Then add a new entry so that the DSP OS | ||
| 45 | * can continue in order to dump the stack. | ||
| 46 | */ | ||
| 47 | tmp = iommu_read_reg(mmu, MMU_CNTL); | ||
| 48 | tmp &= ~MMU_CNTL_TWL_EN; | ||
| 49 | iommu_write_reg(mmu, tmp, MMU_CNTL); | ||
| 50 | fa = iommu_read_reg(mmu, MMU_FAULT_AD); | ||
| 51 | e.da = fa & PAGE_MASK; | ||
| 52 | e.pa = virt_to_phys(dummy_addr); | ||
| 53 | e.valid = 1; | ||
| 54 | e.prsvd = 1; | ||
| 55 | e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK; | ||
| 56 | e.endian = MMU_RAM_ENDIAN_LITTLE; | ||
| 57 | e.elsz = MMU_RAM_ELSZ_32; | ||
| 58 | e.mixed = 0; | ||
| 59 | |||
| 60 | load_iotlb_entry(mmu, &e); | ||
| 61 | |||
| 62 | dsp_clk_enable(DSP_CLK_GPT8); | ||
| 63 | |||
| 64 | dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); | ||
| 65 | |||
| 66 | /* Clear MMU interrupt */ | ||
| 67 | tmp = iommu_read_reg(mmu, MMU_IRQSTATUS); | ||
| 68 | iommu_write_reg(mmu, tmp, MMU_IRQSTATUS); | ||
| 69 | |||
| 70 | dump_dsp_stack(dev_context); | ||
| 71 | dsp_clk_disable(DSP_CLK_GPT8); | ||
| 72 | |||
| 73 | iopgtable_clear_entry(mmu, fa); | ||
| 74 | free_page((unsigned long)dummy_addr); | ||
| 75 | } | ||
| 76 | #endif | ||
| 77 | |||
| 78 | |||
| 79 | static void fault_tasklet(unsigned long data) | ||
| 80 | { | ||
| 81 | struct iommu *mmu = (struct iommu *)data; | ||
| 82 | struct bridge_dev_context *dev_ctx; | ||
| 83 | struct deh_mgr *dm; | ||
| 84 | u32 fa; | ||
| 85 | dev_get_deh_mgr(dev_get_first(), &dm); | ||
| 86 | dev_get_bridge_context(dev_get_first(), &dev_ctx); | ||
| 87 | |||
| 88 | if (!dm || !dev_ctx) | ||
| 89 | return; | ||
| 90 | |||
| 91 | fa = iommu_read_reg(mmu, MMU_FAULT_AD); | ||
| 92 | |||
| 93 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
| 94 | print_dsp_trace_buffer(dev_ctx); | ||
| 95 | dump_dl_modules(dev_ctx); | ||
| 96 | mmu_fault_print_stack(dev_ctx); | ||
| 97 | #endif | ||
| 98 | |||
| 99 | bridge_deh_notify(dm, DSP_MMUFAULT, fa); | ||
| 100 | } | ||
| 101 | |||
| 102 | /* | ||
| 103 | * ======== mmu_fault_isr ======== | ||
| 104 | * ISR to be triggered by a DSP MMU fault interrupt. | ||
| 105 | */ | ||
| 106 | static int mmu_fault_callback(struct iommu *mmu) | ||
| 107 | { | ||
| 108 | if (!mmu) | ||
| 109 | return -EPERM; | ||
| 110 | |||
| 111 | iommu_write_reg(mmu, 0, MMU_IRQENABLE); | ||
| 112 | tasklet_schedule(&mmu_tasklet); | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | /** | ||
| 117 | * dsp_mmu_init() - initialize dsp_mmu module and returns a handle | ||
| 118 | * | ||
| 119 | * This function initialize dsp mmu module and returns a struct iommu | ||
| 120 | * handle to use it for dsp maps. | ||
| 121 | * | ||
| 122 | */ | ||
| 123 | struct iommu *dsp_mmu_init() | ||
| 124 | { | ||
| 125 | struct iommu *mmu; | ||
| 126 | |||
| 127 | mmu = iommu_get("iva2"); | ||
| 128 | |||
| 129 | if (!IS_ERR(mmu)) { | ||
| 130 | tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu); | ||
| 131 | mmu->isr = mmu_fault_callback; | ||
| 132 | } | ||
| 133 | |||
| 134 | return mmu; | ||
| 135 | } | ||
| 136 | |||
| 137 | /** | ||
| 138 | * dsp_mmu_exit() - destroy dsp mmu module | ||
| 139 | * @mmu: Pointer to iommu handle. | ||
| 140 | * | ||
| 141 | * This function destroys dsp mmu module. | ||
| 142 | * | ||
| 143 | */ | ||
| 144 | void dsp_mmu_exit(struct iommu *mmu) | ||
| 145 | { | ||
| 146 | if (mmu) | ||
| 147 | iommu_put(mmu); | ||
| 148 | tasklet_kill(&mmu_tasklet); | ||
| 149 | } | ||
| 150 | |||
| 151 | /** | ||
| 152 | * user_va2_pa() - get physical address from userspace address. | ||
| 153 | * @mm: mm_struct Pointer of the process. | ||
| 154 | * @address: Virtual user space address. | ||
| 155 | * | ||
| 156 | */ | ||
| 157 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | ||
| 158 | { | ||
| 159 | pgd_t *pgd; | ||
| 160 | pmd_t *pmd; | ||
| 161 | pte_t *ptep, pte; | ||
| 162 | |||
| 163 | pgd = pgd_offset(mm, address); | ||
| 164 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | ||
| 165 | pmd = pmd_offset(pgd, address); | ||
| 166 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | ||
| 167 | ptep = pte_offset_map(pmd, address); | ||
| 168 | if (ptep) { | ||
| 169 | pte = *ptep; | ||
| 170 | if (pte_present(pte)) | ||
| 171 | return pte & PAGE_MASK; | ||
| 172 | } | ||
| 173 | } | ||
| 174 | } | ||
| 175 | |||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | /** | ||
| 180 | * get_io_pages() - pin and get pages of io user's buffer. | ||
| 181 | * @mm: mm_struct Pointer of the process. | ||
| 182 | * @uva: Virtual user space address. | ||
| 183 | * @pages Pages to be pined. | ||
| 184 | * @usr_pgs struct page array pointer where the user pages will be stored | ||
| 185 | * | ||
| 186 | */ | ||
| 187 | static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, | ||
| 188 | struct page **usr_pgs) | ||
| 189 | { | ||
| 190 | u32 pa; | ||
| 191 | int i; | ||
| 192 | struct page *pg; | ||
| 193 | |||
| 194 | for (i = 0; i < pages; i++) { | ||
| 195 | pa = user_va2_pa(mm, uva); | ||
| 196 | |||
| 197 | if (!pfn_valid(__phys_to_pfn(pa))) | ||
| 198 | break; | ||
| 199 | |||
| 200 | pg = phys_to_page(pa); | ||
| 201 | usr_pgs[i] = pg; | ||
| 202 | get_page(pg); | ||
| 203 | } | ||
| 204 | return i; | ||
| 205 | } | ||
| 206 | |||
| 207 | /** | ||
| 208 | * user_to_dsp_map() - maps user to dsp virtual address | ||
| 209 | * @mmu: Pointer to iommu handle. | ||
| 210 | * @uva: Virtual user space address. | ||
| 211 | * @da DSP address | ||
| 212 | * @size Buffer size to map. | ||
| 213 | * @usr_pgs struct page array pointer where the user pages will be stored | ||
| 214 | * | ||
| 215 | * This function maps a user space buffer into DSP virtual address. | ||
| 216 | * | ||
| 217 | */ | ||
| 218 | u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, | ||
| 219 | struct page **usr_pgs) | ||
| 220 | { | ||
| 221 | int res, w; | ||
| 222 | unsigned pages; | ||
| 223 | int i; | ||
| 224 | struct vm_area_struct *vma; | ||
| 225 | struct mm_struct *mm = current->mm; | ||
| 226 | struct sg_table *sgt; | ||
| 227 | struct scatterlist *sg; | ||
| 228 | |||
| 229 | if (!size || !usr_pgs) | ||
| 230 | return -EINVAL; | ||
| 231 | |||
| 232 | pages = size / PG_SIZE4K; | ||
| 233 | |||
| 234 | down_read(&mm->mmap_sem); | ||
| 235 | vma = find_vma(mm, uva); | ||
| 236 | while (vma && (uva + size > vma->vm_end)) | ||
| 237 | vma = find_vma(mm, vma->vm_end + 1); | ||
| 238 | |||
| 239 | if (!vma) { | ||
| 240 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | ||
| 241 | __func__, uva, size); | ||
| 242 | up_read(&mm->mmap_sem); | ||
| 243 | return -EINVAL; | ||
| 244 | } | ||
| 245 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | ||
| 246 | w = 1; | ||
| 247 | |||
| 248 | if (vma->vm_flags & VM_IO) | ||
| 249 | i = get_io_pages(mm, uva, pages, usr_pgs); | ||
| 250 | else | ||
| 251 | i = get_user_pages(current, mm, uva, pages, w, 1, | ||
| 252 | usr_pgs, NULL); | ||
| 253 | up_read(&mm->mmap_sem); | ||
| 254 | |||
| 255 | if (i < 0) | ||
| 256 | return i; | ||
| 257 | |||
| 258 | if (i < pages) { | ||
| 259 | res = -EFAULT; | ||
| 260 | goto err_pages; | ||
| 261 | } | ||
| 262 | |||
| 263 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
| 264 | if (!sgt) { | ||
| 265 | res = -ENOMEM; | ||
| 266 | goto err_pages; | ||
| 267 | } | ||
| 268 | |||
| 269 | res = sg_alloc_table(sgt, pages, GFP_KERNEL); | ||
| 270 | |||
| 271 | if (res < 0) | ||
| 272 | goto err_sg; | ||
| 273 | |||
| 274 | for_each_sg(sgt->sgl, sg, sgt->nents, i) | ||
| 275 | sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); | ||
| 276 | |||
| 277 | da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | ||
| 278 | |||
| 279 | if (!IS_ERR_VALUE(da)) | ||
| 280 | return da; | ||
| 281 | res = (int)da; | ||
| 282 | |||
| 283 | sg_free_table(sgt); | ||
| 284 | err_sg: | ||
| 285 | kfree(sgt); | ||
| 286 | i = pages; | ||
| 287 | err_pages: | ||
| 288 | while (i--) | ||
| 289 | put_page(usr_pgs[i]); | ||
| 290 | return res; | ||
| 291 | } | ||
| 292 | |||
| 293 | /** | ||
| 294 | * user_to_dsp_unmap() - unmaps DSP virtual buffer. | ||
| 295 | * @mmu: Pointer to iommu handle. | ||
| 296 | * @da DSP address | ||
| 297 | * | ||
| 298 | * This function unmaps a user space buffer into DSP virtual address. | ||
| 299 | * | ||
| 300 | */ | ||
| 301 | int user_to_dsp_unmap(struct iommu *mmu, u32 da) | ||
| 302 | { | ||
| 303 | unsigned i; | ||
| 304 | struct sg_table *sgt; | ||
| 305 | struct scatterlist *sg; | ||
| 306 | |||
| 307 | sgt = iommu_vunmap(mmu, da); | ||
| 308 | if (!sgt) | ||
| 309 | return -EFAULT; | ||
| 310 | |||
| 311 | for_each_sg(sgt->sgl, sg, sgt->nents, i) | ||
| 312 | put_page(sg_page(sg)); | ||
| 313 | sg_free_table(sgt); | ||
| 314 | kfree(sgt); | ||
| 315 | |||
| 316 | return 0; | ||
| 317 | } | ||
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c index 194badaba0ed..571864555ddd 100644 --- a/drivers/staging/tidspbridge/core/io_sm.c +++ b/drivers/staging/tidspbridge/core/io_sm.c | |||
| @@ -39,6 +39,10 @@ | |||
| 39 | #include <dspbridge/ntfy.h> | 39 | #include <dspbridge/ntfy.h> |
| 40 | #include <dspbridge/sync.h> | 40 | #include <dspbridge/sync.h> |
| 41 | 41 | ||
| 42 | /* Hardware Abstraction Layer */ | ||
| 43 | #include <hw_defs.h> | ||
| 44 | #include <hw_mmu.h> | ||
| 45 | |||
| 42 | /* Bridge Driver */ | 46 | /* Bridge Driver */ |
| 43 | #include <dspbridge/dspdeh.h> | 47 | #include <dspbridge/dspdeh.h> |
| 44 | #include <dspbridge/dspio.h> | 48 | #include <dspbridge/dspio.h> |
| @@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 287 | struct cod_manager *cod_man; | 291 | struct cod_manager *cod_man; |
| 288 | struct chnl_mgr *hchnl_mgr; | 292 | struct chnl_mgr *hchnl_mgr; |
| 289 | struct msg_mgr *hmsg_mgr; | 293 | struct msg_mgr *hmsg_mgr; |
| 290 | struct shm_segs *sm_sg; | ||
| 291 | u32 ul_shm_base; | 294 | u32 ul_shm_base; |
| 292 | u32 ul_shm_base_offset; | 295 | u32 ul_shm_base_offset; |
| 293 | u32 ul_shm_limit; | 296 | u32 ul_shm_limit; |
| @@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 310 | struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; | 313 | struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; |
| 311 | struct cfg_hostres *host_res; | 314 | struct cfg_hostres *host_res; |
| 312 | struct bridge_dev_context *pbridge_context; | 315 | struct bridge_dev_context *pbridge_context; |
| 316 | u32 map_attrs; | ||
| 313 | u32 shm0_end; | 317 | u32 shm0_end; |
| 314 | u32 ul_dyn_ext_base; | 318 | u32 ul_dyn_ext_base; |
| 315 | u32 ul_seg1_size = 0; | 319 | u32 ul_seg1_size = 0; |
| 320 | u32 pa_curr = 0; | ||
| 321 | u32 va_curr = 0; | ||
| 322 | u32 gpp_va_curr = 0; | ||
| 323 | u32 num_bytes = 0; | ||
| 324 | u32 all_bits = 0; | ||
| 325 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | ||
| 326 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | ||
| 327 | }; | ||
| 316 | 328 | ||
| 317 | status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); | 329 | status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); |
| 318 | if (!pbridge_context) { | 330 | if (!pbridge_context) { |
| @@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 325 | status = -EFAULT; | 337 | status = -EFAULT; |
| 326 | goto func_end; | 338 | goto func_end; |
| 327 | } | 339 | } |
| 328 | sm_sg = &pbridge_context->sh_s; | ||
| 329 | |||
| 330 | status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); | 340 | status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); |
| 331 | if (!cod_man) { | 341 | if (!cod_man) { |
| 332 | status = -EFAULT; | 342 | status = -EFAULT; |
| @@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 461 | if (status) | 471 | if (status) |
| 462 | goto func_end; | 472 | goto func_end; |
| 463 | 473 | ||
| 464 | sm_sg->seg1_pa = ul_gpp_pa; | 474 | pa_curr = ul_gpp_pa; |
| 465 | sm_sg->seg1_da = ul_dyn_ext_base; | 475 | va_curr = ul_dyn_ext_base * hio_mgr->word_size; |
| 466 | sm_sg->seg1_va = ul_gpp_va; | 476 | gpp_va_curr = ul_gpp_va; |
| 467 | sm_sg->seg1_size = ul_seg1_size; | 477 | num_bytes = ul_seg1_size; |
| 468 | sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; | 478 | |
| 469 | sm_sg->seg0_da = ul_dsp_va; | 479 | /* |
| 470 | sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; | 480 | * Try to fit into TLB entries. If not possible, push them to page |
| 471 | sm_sg->seg0_size = ul_seg_size; | 481 | * tables. It is quite possible that if sections are not on |
| 482 | * bigger page boundary, we may end up making several small pages. | ||
| 483 | * So, push them onto page tables, if that is the case. | ||
| 484 | */ | ||
| 485 | map_attrs = 0x00000000; | ||
| 486 | map_attrs = DSP_MAPLITTLEENDIAN; | ||
| 487 | map_attrs |= DSP_MAPPHYSICALADDR; | ||
| 488 | map_attrs |= DSP_MAPELEMSIZE32; | ||
| 489 | map_attrs |= DSP_MAPDONOTLOCK; | ||
| 490 | |||
| 491 | while (num_bytes) { | ||
| 492 | /* | ||
| 493 | * To find the max. page size with which both PA & VA are | ||
| 494 | * aligned. | ||
| 495 | */ | ||
| 496 | all_bits = pa_curr | va_curr; | ||
| 497 | dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, " | ||
| 498 | "num_bytes %x\n", all_bits, pa_curr, va_curr, | ||
| 499 | num_bytes); | ||
| 500 | for (i = 0; i < 4; i++) { | ||
| 501 | if ((num_bytes >= page_size[i]) && ((all_bits & | ||
| 502 | (page_size[i] - | ||
| 503 | 1)) == 0)) { | ||
| 504 | status = | ||
| 505 | hio_mgr->intf_fxns-> | ||
| 506 | pfn_brd_mem_map(hio_mgr->hbridge_context, | ||
| 507 | pa_curr, va_curr, | ||
| 508 | page_size[i], map_attrs, | ||
| 509 | NULL); | ||
| 510 | if (status) | ||
| 511 | goto func_end; | ||
| 512 | pa_curr += page_size[i]; | ||
| 513 | va_curr += page_size[i]; | ||
| 514 | gpp_va_curr += page_size[i]; | ||
| 515 | num_bytes -= page_size[i]; | ||
| 516 | /* | ||
| 517 | * Don't try smaller sizes. Hopefully we have | ||
| 518 | * reached an address aligned to a bigger page | ||
| 519 | * size. | ||
| 520 | */ | ||
| 521 | break; | ||
| 522 | } | ||
| 523 | } | ||
| 524 | } | ||
| 525 | pa_curr += ul_pad_size; | ||
| 526 | va_curr += ul_pad_size; | ||
| 527 | gpp_va_curr += ul_pad_size; | ||
| 528 | |||
| 529 | /* Configure the TLB entries for the next cacheable segment */ | ||
| 530 | num_bytes = ul_seg_size; | ||
| 531 | va_curr = ul_dsp_va * hio_mgr->word_size; | ||
| 532 | while (num_bytes) { | ||
| 533 | /* | ||
| 534 | * To find the max. page size with which both PA & VA are | ||
| 535 | * aligned. | ||
| 536 | */ | ||
| 537 | all_bits = pa_curr | va_curr; | ||
| 538 | dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, " | ||
| 539 | "va_curr %x, num_bytes %x\n", all_bits, pa_curr, | ||
| 540 | va_curr, num_bytes); | ||
| 541 | for (i = 0; i < 4; i++) { | ||
| 542 | if (!(num_bytes >= page_size[i]) || | ||
| 543 | !((all_bits & (page_size[i] - 1)) == 0)) | ||
| 544 | continue; | ||
| 545 | if (ndx < MAX_LOCK_TLB_ENTRIES) { | ||
| 546 | /* | ||
| 547 | * This is the physical address written to | ||
| 548 | * DSP MMU. | ||
| 549 | */ | ||
| 550 | ae_proc[ndx].ul_gpp_pa = pa_curr; | ||
| 551 | /* | ||
| 552 | * This is the virtual uncached ioremapped | ||
| 553 | * address!!! | ||
| 554 | */ | ||
| 555 | ae_proc[ndx].ul_gpp_va = gpp_va_curr; | ||
| 556 | ae_proc[ndx].ul_dsp_va = | ||
| 557 | va_curr / hio_mgr->word_size; | ||
| 558 | ae_proc[ndx].ul_size = page_size[i]; | ||
| 559 | ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; | ||
| 560 | ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT; | ||
| 561 | ae_proc[ndx].mixed_mode = HW_MMU_CPUES; | ||
| 562 | dev_dbg(bridge, "shm MMU TLB entry PA %x" | ||
| 563 | " VA %x DSP_VA %x Size %x\n", | ||
| 564 | ae_proc[ndx].ul_gpp_pa, | ||
| 565 | ae_proc[ndx].ul_gpp_va, | ||
| 566 | ae_proc[ndx].ul_dsp_va * | ||
| 567 | hio_mgr->word_size, page_size[i]); | ||
| 568 | ndx++; | ||
| 569 | } else { | ||
| 570 | status = | ||
| 571 | hio_mgr->intf_fxns-> | ||
| 572 | pfn_brd_mem_map(hio_mgr->hbridge_context, | ||
| 573 | pa_curr, va_curr, | ||
| 574 | page_size[i], map_attrs, | ||
| 575 | NULL); | ||
| 576 | dev_dbg(bridge, | ||
| 577 | "shm MMU PTE entry PA %x" | ||
| 578 | " VA %x DSP_VA %x Size %x\n", | ||
| 579 | ae_proc[ndx].ul_gpp_pa, | ||
| 580 | ae_proc[ndx].ul_gpp_va, | ||
| 581 | ae_proc[ndx].ul_dsp_va * | ||
| 582 | hio_mgr->word_size, page_size[i]); | ||
| 583 | if (status) | ||
| 584 | goto func_end; | ||
| 585 | } | ||
| 586 | pa_curr += page_size[i]; | ||
| 587 | va_curr += page_size[i]; | ||
| 588 | gpp_va_curr += page_size[i]; | ||
| 589 | num_bytes -= page_size[i]; | ||
| 590 | /* | ||
| 591 | * Don't try smaller sizes. Hopefully we have reached | ||
| 592 | * an address aligned to a bigger page size. | ||
| 593 | */ | ||
| 594 | break; | ||
| 595 | } | ||
| 596 | } | ||
| 472 | 597 | ||
| 473 | /* | 598 | /* |
| 474 | * Copy remaining entries from CDB. All entries are 1 MB and | 599 | * Copy remaining entries from CDB. All entries are 1 MB and |
| @@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 509 | "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, | 634 | "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, |
| 510 | ae_proc[ndx].ul_dsp_va); | 635 | ae_proc[ndx].ul_dsp_va); |
| 511 | ndx++; | 636 | ndx++; |
| 637 | } else { | ||
| 638 | status = hio_mgr->intf_fxns->pfn_brd_mem_map | ||
| 639 | (hio_mgr->hbridge_context, | ||
| 640 | hio_mgr->ext_proc_info.ty_tlb[i]. | ||
| 641 | ul_gpp_phys, | ||
| 642 | hio_mgr->ext_proc_info.ty_tlb[i]. | ||
| 643 | ul_dsp_virt, 0x100000, map_attrs, | ||
| 644 | NULL); | ||
| 512 | } | 645 | } |
| 513 | } | 646 | } |
| 514 | if (status) | 647 | if (status) |
| 515 | goto func_end; | 648 | goto func_end; |
| 516 | } | 649 | } |
| 517 | 650 | ||
| 651 | map_attrs = 0x00000000; | ||
| 652 | map_attrs = DSP_MAPLITTLEENDIAN; | ||
| 653 | map_attrs |= DSP_MAPPHYSICALADDR; | ||
| 654 | map_attrs |= DSP_MAPELEMSIZE32; | ||
| 655 | map_attrs |= DSP_MAPDONOTLOCK; | ||
| 656 | |||
| 657 | /* Map the L4 peripherals */ | ||
| 658 | i = 0; | ||
| 659 | while (l4_peripheral_table[i].phys_addr) { | ||
| 660 | status = hio_mgr->intf_fxns->pfn_brd_mem_map | ||
| 661 | (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, | ||
| 662 | l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, | ||
| 663 | map_attrs, NULL); | ||
| 664 | if (status) | ||
| 665 | goto func_end; | ||
| 666 | i++; | ||
| 667 | } | ||
| 668 | |||
| 518 | for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { | 669 | for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { |
| 519 | ae_proc[i].ul_dsp_va = 0; | 670 | ae_proc[i].ul_dsp_va = 0; |
| 520 | ae_proc[i].ul_gpp_pa = 0; | 671 | ae_proc[i].ul_gpp_pa = 0; |
| @@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 537 | status = -EFAULT; | 688 | status = -EFAULT; |
| 538 | goto func_end; | 689 | goto func_end; |
| 539 | } else { | 690 | } else { |
| 540 | if (sm_sg->seg0_da > ul_shm_base) { | 691 | if (ae_proc[0].ul_dsp_va > ul_shm_base) { |
| 541 | status = -EPERM; | 692 | status = -EPERM; |
| 542 | goto func_end; | 693 | goto func_end; |
| 543 | } | 694 | } |
| 544 | /* ul_shm_base may not be at ul_dsp_va address */ | 695 | /* ul_shm_base may not be at ul_dsp_va address */ |
| 545 | ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * | 696 | ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) * |
| 546 | hio_mgr->word_size; | 697 | hio_mgr->word_size; |
| 547 | /* | 698 | /* |
| 548 | * bridge_dev_ctrl() will set dev context dsp-mmu info. In | 699 | * bridge_dev_ctrl() will set dev context dsp-mmu info. In |
| @@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
| 566 | goto func_end; | 717 | goto func_end; |
| 567 | } | 718 | } |
| 568 | /* Register SM */ | 719 | /* Register SM */ |
| 569 | status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); | 720 | status = |
| 721 | register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa); | ||
| 570 | } | 722 | } |
| 571 | 723 | ||
| 572 | hio_mgr->shared_mem = (struct shm *)ul_shm_base; | 724 | hio_mgr->shared_mem = (struct shm *)ul_shm_base; |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index f22bc12bc0d3..1be081f917a7 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <dspbridge/host_os.h> | 23 | #include <dspbridge/host_os.h> |
| 24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
| 25 | #include <linux/mmzone.h> | 25 | #include <linux/mmzone.h> |
| 26 | #include <plat/control.h> | ||
| 27 | 26 | ||
| 28 | /* ----------------------------------- DSP/BIOS Bridge */ | 27 | /* ----------------------------------- DSP/BIOS Bridge */ |
| 29 | #include <dspbridge/dbdefs.h> | 28 | #include <dspbridge/dbdefs.h> |
| @@ -35,6 +34,10 @@ | |||
| 35 | #include <dspbridge/drv.h> | 34 | #include <dspbridge/drv.h> |
| 36 | #include <dspbridge/sync.h> | 35 | #include <dspbridge/sync.h> |
| 37 | 36 | ||
| 37 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
| 38 | #include <hw_defs.h> | ||
| 39 | #include <hw_mmu.h> | ||
| 40 | |||
| 38 | /* ----------------------------------- Link Driver */ | 41 | /* ----------------------------------- Link Driver */ |
| 39 | #include <dspbridge/dspdefs.h> | 42 | #include <dspbridge/dspdefs.h> |
| 40 | #include <dspbridge/dspchnl.h> | 43 | #include <dspbridge/dspchnl.h> |
| @@ -47,6 +50,7 @@ | |||
| 47 | /* ----------------------------------- Platform Manager */ | 50 | /* ----------------------------------- Platform Manager */ |
| 48 | #include <dspbridge/dev.h> | 51 | #include <dspbridge/dev.h> |
| 49 | #include <dspbridge/dspapi.h> | 52 | #include <dspbridge/dspapi.h> |
| 53 | #include <dspbridge/dmm.h> | ||
| 50 | #include <dspbridge/wdt.h> | 54 | #include <dspbridge/wdt.h> |
| 51 | 55 | ||
| 52 | /* ----------------------------------- Local */ | 56 | /* ----------------------------------- Local */ |
| @@ -67,6 +71,20 @@ | |||
| 67 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | 71 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 |
| 68 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 | 72 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 |
| 69 | #define PAGES_II_LVL_TABLE 512 | 73 | #define PAGES_II_LVL_TABLE 512 |
| 74 | #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) | ||
| 75 | |||
| 76 | /* | ||
| 77 | * This is a totally ugly layer violation, but needed until | ||
| 78 | * omap_ctrl_set_dsp_boot*() are provided. | ||
| 79 | */ | ||
| 80 | #define OMAP3_IVA2_BOOTMOD_IDLE 1 | ||
| 81 | #define OMAP2_CONTROL_GENERAL 0x270 | ||
| 82 | #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) | ||
| 83 | #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) | ||
| 84 | |||
| 85 | #define OMAP343X_CTRL_REGADDR(reg) \ | ||
| 86 | OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) | ||
| 87 | |||
| 70 | 88 | ||
| 71 | /* Forward Declarations: */ | 89 | /* Forward Declarations: */ |
| 72 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); | 90 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); |
| @@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, | |||
| 91 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | 109 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, |
| 92 | u8 *host_buff, u32 dsp_addr, | 110 | u8 *host_buff, u32 dsp_addr, |
| 93 | u32 ul_num_bytes, u32 mem_type); | 111 | u32 ul_num_bytes, u32 mem_type); |
| 112 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | ||
| 113 | u32 ul_mpu_addr, u32 virt_addr, | ||
| 114 | u32 ul_num_bytes, u32 ul_map_attr, | ||
| 115 | struct page **mapped_pages); | ||
| 116 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | ||
| 117 | u32 virt_addr, u32 ul_num_bytes); | ||
| 94 | static int bridge_dev_create(struct bridge_dev_context | 118 | static int bridge_dev_create(struct bridge_dev_context |
| 95 | **dev_cntxt, | 119 | **dev_cntxt, |
| 96 | struct dev_object *hdev_obj, | 120 | struct dev_object *hdev_obj, |
| @@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context | |||
| 98 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | 122 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, |
| 99 | u32 dw_cmd, void *pargs); | 123 | u32 dw_cmd, void *pargs); |
| 100 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); | 124 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); |
| 125 | static u32 user_va2_pa(struct mm_struct *mm, u32 address); | ||
| 126 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | ||
| 127 | u32 va, u32 size, | ||
| 128 | struct hw_mmu_map_attrs_t *map_attrs); | ||
| 129 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | ||
| 130 | u32 size, struct hw_mmu_map_attrs_t *attrs); | ||
| 131 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | ||
| 132 | u32 ul_mpu_addr, u32 virt_addr, | ||
| 133 | u32 ul_num_bytes, | ||
| 134 | struct hw_mmu_map_attrs_t *hw_attrs); | ||
| 135 | |||
| 101 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); | 136 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); |
| 102 | 137 | ||
| 138 | /* ----------------------------------- Globals */ | ||
| 139 | |||
| 140 | /* Attributes of L2 page tables for DSP MMU */ | ||
| 141 | struct page_info { | ||
| 142 | u32 num_entries; /* Number of valid PTEs in the L2 PT */ | ||
| 143 | }; | ||
| 144 | |||
| 145 | /* Attributes used to manage the DSP MMU page tables */ | ||
| 146 | struct pg_table_attrs { | ||
| 147 | spinlock_t pg_lock; /* Critical section object handle */ | ||
| 148 | |||
| 149 | u32 l1_base_pa; /* Physical address of the L1 PT */ | ||
| 150 | u32 l1_base_va; /* Virtual address of the L1 PT */ | ||
| 151 | u32 l1_size; /* Size of the L1 PT */ | ||
| 152 | u32 l1_tbl_alloc_pa; | ||
| 153 | /* Physical address of Allocated mem for L1 table. May not be aligned */ | ||
| 154 | u32 l1_tbl_alloc_va; | ||
| 155 | /* Virtual address of Allocated mem for L1 table. May not be aligned */ | ||
| 156 | u32 l1_tbl_alloc_sz; | ||
| 157 | /* Size of consistent memory allocated for L1 table. | ||
| 158 | * May not be aligned */ | ||
| 159 | |||
| 160 | u32 l2_base_pa; /* Physical address of the L2 PT */ | ||
| 161 | u32 l2_base_va; /* Virtual address of the L2 PT */ | ||
| 162 | u32 l2_size; /* Size of the L2 PT */ | ||
| 163 | u32 l2_tbl_alloc_pa; | ||
| 164 | /* Physical address of Allocated mem for L2 table. May not be aligned */ | ||
| 165 | u32 l2_tbl_alloc_va; | ||
| 166 | /* Virtual address of Allocated mem for L2 table. May not be aligned */ | ||
| 167 | u32 l2_tbl_alloc_sz; | ||
| 168 | /* Size of consistent memory allocated for L2 table. | ||
| 169 | * May not be aligned */ | ||
| 170 | |||
| 171 | u32 l2_num_pages; /* Number of allocated L2 PT */ | ||
| 172 | /* Array [l2_num_pages] of L2 PT info structs */ | ||
| 173 | struct page_info *pg_info; | ||
| 174 | }; | ||
| 175 | |||
| 103 | /* | 176 | /* |
| 104 | * This Bridge driver's function interface table. | 177 | * This Bridge driver's function interface table. |
| 105 | */ | 178 | */ |
| @@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = { | |||
| 119 | bridge_brd_set_state, | 192 | bridge_brd_set_state, |
| 120 | bridge_brd_mem_copy, | 193 | bridge_brd_mem_copy, |
| 121 | bridge_brd_mem_write, | 194 | bridge_brd_mem_write, |
| 195 | bridge_brd_mem_map, | ||
| 196 | bridge_brd_mem_un_map, | ||
| 122 | /* The following CHNL functions are provided by chnl_io.lib: */ | 197 | /* The following CHNL functions are provided by chnl_io.lib: */ |
| 123 | bridge_chnl_create, | 198 | bridge_chnl_create, |
| 124 | bridge_chnl_destroy, | 199 | bridge_chnl_destroy, |
| @@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = { | |||
| 148 | bridge_msg_set_queue_id, | 223 | bridge_msg_set_queue_id, |
| 149 | }; | 224 | }; |
| 150 | 225 | ||
| 226 | static inline void flush_all(struct bridge_dev_context *dev_context) | ||
| 227 | { | ||
| 228 | if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || | ||
| 229 | dev_context->dw_brd_state == BRD_HIBERNATION) | ||
| 230 | wake_dsp(dev_context, NULL); | ||
| 231 | |||
| 232 | hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base); | ||
| 233 | } | ||
| 234 | |||
| 235 | static void bad_page_dump(u32 pa, struct page *pg) | ||
| 236 | { | ||
| 237 | pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); | ||
| 238 | pr_emerg("Bad page state in process '%s'\n" | ||
| 239 | "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | ||
| 240 | "Backtrace:\n", | ||
| 241 | current->comm, pg, (int)(2 * sizeof(unsigned long)), | ||
| 242 | (unsigned long)pg->flags, pg->mapping, | ||
| 243 | page_mapcount(pg), page_count(pg)); | ||
| 244 | dump_stack(); | ||
| 245 | } | ||
| 246 | |||
| 151 | /* | 247 | /* |
| 152 | * ======== bridge_drv_entry ======== | 248 | * ======== bridge_drv_entry ======== |
| 153 | * purpose: | 249 | * purpose: |
| @@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) | |||
| 203 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, | 299 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, |
| 204 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | 300 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); |
| 205 | } | 301 | } |
| 206 | 302 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | |
| 303 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
| 207 | dsp_clk_enable(DSP_CLK_IVA2); | 304 | dsp_clk_enable(DSP_CLK_IVA2); |
| 208 | 305 | ||
| 209 | /* set the device state to IDLE */ | 306 | /* set the device state to IDLE */ |
| @@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 274 | { | 371 | { |
| 275 | int status = 0; | 372 | int status = 0; |
| 276 | struct bridge_dev_context *dev_context = dev_ctxt; | 373 | struct bridge_dev_context *dev_context = dev_ctxt; |
| 277 | struct iommu *mmu = NULL; | ||
| 278 | struct shm_segs *sm_sg; | ||
| 279 | int l4_i = 0, tlb_i = 0; | ||
| 280 | u32 sg0_da = 0, sg1_da = 0; | ||
| 281 | struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; | ||
| 282 | u32 dw_sync_addr = 0; | 374 | u32 dw_sync_addr = 0; |
| 283 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ | 375 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ |
| 284 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ | 376 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ |
| 285 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ | 377 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ |
| 286 | /* Offset of shm_base_virt from tlb_base_virt */ | 378 | /* Offset of shm_base_virt from tlb_base_virt */ |
| 287 | u32 ul_shm_offset_virt; | 379 | u32 ul_shm_offset_virt; |
| 380 | s32 entry_ndx; | ||
| 381 | s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ | ||
| 288 | struct cfg_hostres *resources = NULL; | 382 | struct cfg_hostres *resources = NULL; |
| 289 | u32 temp; | 383 | u32 temp; |
| 290 | u32 ul_dsp_clk_rate; | 384 | u32 ul_dsp_clk_rate; |
| @@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 305 | ul_shm_base_virt *= DSPWORDSIZE; | 399 | ul_shm_base_virt *= DSPWORDSIZE; |
| 306 | DBC_ASSERT(ul_shm_base_virt != 0); | 400 | DBC_ASSERT(ul_shm_base_virt != 0); |
| 307 | /* DSP Virtual address */ | 401 | /* DSP Virtual address */ |
| 308 | ul_tlb_base_virt = dev_context->sh_s.seg0_da; | 402 | ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va; |
| 309 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 403 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
| 310 | ul_shm_offset_virt = | 404 | ul_shm_offset_virt = |
| 311 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); | 405 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); |
| 312 | /* Kernel logical address */ | 406 | /* Kernel logical address */ |
| 313 | ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; | 407 | ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt; |
| 314 | 408 | ||
| 315 | DBC_ASSERT(ul_shm_base != 0); | 409 | DBC_ASSERT(ul_shm_base != 0); |
| 316 | /* 2nd wd is used as sync field */ | 410 | /* 2nd wd is used as sync field */ |
| @@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 345 | OMAP343X_CONTROL_IVA2_BOOTMOD)); | 439 | OMAP343X_CONTROL_IVA2_BOOTMOD)); |
| 346 | } | 440 | } |
| 347 | } | 441 | } |
| 348 | |||
| 349 | if (!status) { | 442 | if (!status) { |
| 443 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to | ||
| 444 | * IVA2 SYSC register */ | ||
| 445 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | ||
| 446 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
| 447 | udelay(100); | ||
| 350 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | 448 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, |
| 351 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 449 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
| 352 | mmu = dev_context->dsp_mmu; | 450 | udelay(100); |
| 353 | if (mmu) | 451 | |
| 354 | dsp_mmu_exit(mmu); | 452 | /* Disbale the DSP MMU */ |
| 355 | mmu = dsp_mmu_init(); | 453 | hw_mmu_disable(resources->dw_dmmu_base); |
| 356 | if (IS_ERR(mmu)) { | 454 | /* Disable TWL */ |
| 357 | dev_err(bridge, "dsp_mmu_init failed!\n"); | 455 | hw_mmu_twl_disable(resources->dw_dmmu_base); |
| 358 | dev_context->dsp_mmu = NULL; | 456 | |
| 359 | status = (int)mmu; | 457 | /* Only make TLB entry if both addresses are non-zero */ |
| 360 | } | 458 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; |
| 361 | } | 459 | entry_ndx++) { |
| 362 | if (!status) { | 460 | struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; |
| 363 | dev_context->dsp_mmu = mmu; | 461 | struct hw_mmu_map_attrs_t map_attrs = { |
| 364 | sm_sg = &dev_context->sh_s; | 462 | .endianism = e->endianism, |
| 365 | sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, | 463 | .element_size = e->elem_size, |
| 366 | sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | 464 | .mixed_size = e->mixed_mode, |
| 367 | if (IS_ERR_VALUE(sg0_da)) { | 465 | }; |
| 368 | status = (int)sg0_da; | 466 | |
| 369 | sg0_da = 0; | 467 | if (!e->ul_gpp_pa || !e->ul_dsp_va) |
| 370 | } | ||
| 371 | } | ||
| 372 | if (!status) { | ||
| 373 | sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa, | ||
| 374 | sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | ||
| 375 | if (IS_ERR_VALUE(sg1_da)) { | ||
| 376 | status = (int)sg1_da; | ||
| 377 | sg1_da = 0; | ||
| 378 | } | ||
| 379 | } | ||
| 380 | if (!status) { | ||
| 381 | u32 da; | ||
| 382 | for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) { | ||
| 383 | if (!tlb[tlb_i].ul_gpp_pa) | ||
| 384 | continue; | 468 | continue; |
| 385 | 469 | ||
| 386 | dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" | 470 | dev_dbg(bridge, |
| 387 | " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, | 471 | "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", |
| 388 | tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); | 472 | itmp_entry_ndx, |
| 389 | 473 | e->ul_gpp_pa, | |
| 390 | da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, | 474 | e->ul_dsp_va, |
| 391 | tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, | 475 | e->ul_size); |
| 392 | IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | 476 | |
| 393 | if (IS_ERR_VALUE(da)) { | 477 | hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, |
| 394 | status = (int)da; | 478 | e->ul_gpp_pa, |
| 395 | break; | 479 | e->ul_dsp_va, |
| 396 | } | 480 | e->ul_size, |
| 397 | } | 481 | itmp_entry_ndx, |
| 398 | } | 482 | &map_attrs, 1, 1); |
| 399 | if (!status) { | 483 | |
| 400 | u32 da; | 484 | itmp_entry_ndx++; |
| 401 | l4_i = 0; | ||
| 402 | while (l4_peripheral_table[l4_i].phys_addr) { | ||
| 403 | da = iommu_kmap(mmu, l4_peripheral_table[l4_i]. | ||
| 404 | dsp_virt_addr, l4_peripheral_table[l4_i]. | ||
| 405 | phys_addr, PAGE_SIZE, | ||
| 406 | IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | ||
| 407 | if (IS_ERR_VALUE(da)) { | ||
| 408 | status = (int)da; | ||
| 409 | break; | ||
| 410 | } | ||
| 411 | l4_i++; | ||
| 412 | } | 485 | } |
| 413 | } | 486 | } |
| 414 | 487 | ||
| 415 | /* Lock the above TLB entries and get the BIOS and load monitor timer | 488 | /* Lock the above TLB entries and get the BIOS and load monitor timer |
| 416 | * information */ | 489 | * information */ |
| 417 | if (!status) { | 490 | if (!status) { |
| 491 | hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); | ||
| 492 | hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); | ||
| 493 | hw_mmu_ttb_set(resources->dw_dmmu_base, | ||
| 494 | dev_context->pt_attrs->l1_base_pa); | ||
| 495 | hw_mmu_twl_enable(resources->dw_dmmu_base); | ||
| 496 | /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ | ||
| 497 | |||
| 498 | temp = __raw_readl((resources->dw_dmmu_base) + 0x10); | ||
| 499 | temp = (temp & 0xFFFFFFEF) | 0x11; | ||
| 500 | __raw_writel(temp, (resources->dw_dmmu_base) + 0x10); | ||
| 501 | |||
| 502 | /* Let the DSP MMU run */ | ||
| 503 | hw_mmu_enable(resources->dw_dmmu_base); | ||
| 504 | |||
| 418 | /* Enable the BIOS clock */ | 505 | /* Enable the BIOS clock */ |
| 419 | (void)dev_get_symbol(dev_context->hdev_obj, | 506 | (void)dev_get_symbol(dev_context->hdev_obj, |
| 420 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); | 507 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); |
| 421 | (void)dev_get_symbol(dev_context->hdev_obj, | 508 | (void)dev_get_symbol(dev_context->hdev_obj, |
| 422 | BRIDGEINIT_LOADMON_GPTIMER, | 509 | BRIDGEINIT_LOADMON_GPTIMER, |
| 423 | &ul_load_monitor_timer); | 510 | &ul_load_monitor_timer); |
| 511 | } | ||
| 424 | 512 | ||
| 513 | if (!status) { | ||
| 425 | if (ul_load_monitor_timer != 0xFFFF) { | 514 | if (ul_load_monitor_timer != 0xFFFF) { |
| 426 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | 515 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | |
| 427 | ul_load_monitor_timer; | 516 | ul_load_monitor_timer; |
| @@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 430 | dev_dbg(bridge, "Not able to get the symbol for Load " | 519 | dev_dbg(bridge, "Not able to get the symbol for Load " |
| 431 | "Monitor Timer\n"); | 520 | "Monitor Timer\n"); |
| 432 | } | 521 | } |
| 522 | } | ||
| 433 | 523 | ||
| 524 | if (!status) { | ||
| 434 | if (ul_bios_gp_timer != 0xFFFF) { | 525 | if (ul_bios_gp_timer != 0xFFFF) { |
| 435 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | 526 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | |
| 436 | ul_bios_gp_timer; | 527 | ul_bios_gp_timer; |
| @@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 439 | dev_dbg(bridge, | 530 | dev_dbg(bridge, |
| 440 | "Not able to get the symbol for BIOS Timer\n"); | 531 | "Not able to get the symbol for BIOS Timer\n"); |
| 441 | } | 532 | } |
| 533 | } | ||
| 442 | 534 | ||
| 535 | if (!status) { | ||
| 443 | /* Set the DSP clock rate */ | 536 | /* Set the DSP clock rate */ |
| 444 | (void)dev_get_symbol(dev_context->hdev_obj, | 537 | (void)dev_get_symbol(dev_context->hdev_obj, |
| 445 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); | 538 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); |
| @@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 492 | 585 | ||
| 493 | /* Let DSP go */ | 586 | /* Let DSP go */ |
| 494 | dev_dbg(bridge, "%s Unreset\n", __func__); | 587 | dev_dbg(bridge, "%s Unreset\n", __func__); |
| 588 | /* Enable DSP MMU Interrupts */ | ||
| 589 | hw_mmu_event_enable(resources->dw_dmmu_base, | ||
| 590 | HW_MMU_ALL_INTERRUPTS); | ||
| 495 | /* release the RST1, DSP starts executing now .. */ | 591 | /* release the RST1, DSP starts executing now .. */ |
| 496 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, | 592 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, |
| 497 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 593 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
| @@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 521 | 617 | ||
| 522 | /* update board state */ | 618 | /* update board state */ |
| 523 | dev_context->dw_brd_state = BRD_RUNNING; | 619 | dev_context->dw_brd_state = BRD_RUNNING; |
| 524 | return 0; | 620 | /* (void)chnlsm_enable_interrupt(dev_context); */ |
| 525 | } else { | 621 | } else { |
| 526 | dev_context->dw_brd_state = BRD_UNKNOWN; | 622 | dev_context->dw_brd_state = BRD_UNKNOWN; |
| 527 | } | 623 | } |
| 528 | } | 624 | } |
| 529 | |||
| 530 | while (tlb_i--) { | ||
| 531 | if (!tlb[tlb_i].ul_gpp_pa) | ||
| 532 | continue; | ||
| 533 | iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va); | ||
| 534 | } | ||
| 535 | while (l4_i--) | ||
| 536 | iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr); | ||
| 537 | if (sg0_da) | ||
| 538 | iommu_kunmap(mmu, sg0_da); | ||
| 539 | if (sg1_da) | ||
| 540 | iommu_kunmap(mmu, sg1_da); | ||
| 541 | return status; | 625 | return status; |
| 542 | } | 626 | } |
| 543 | 627 | ||
| @@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) | |||
| 553 | { | 637 | { |
| 554 | int status = 0; | 638 | int status = 0; |
| 555 | struct bridge_dev_context *dev_context = dev_ctxt; | 639 | struct bridge_dev_context *dev_context = dev_ctxt; |
| 640 | struct pg_table_attrs *pt_attrs; | ||
| 556 | u32 dsp_pwr_state; | 641 | u32 dsp_pwr_state; |
| 557 | int i; | ||
| 558 | struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; | ||
| 559 | struct omap_dsp_platform_data *pdata = | 642 | struct omap_dsp_platform_data *pdata = |
| 560 | omap_dspbridge_dev->dev.platform_data; | 643 | omap_dspbridge_dev->dev.platform_data; |
| 561 | 644 | ||
| @@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) | |||
| 591 | 674 | ||
| 592 | dsp_wdt_enable(false); | 675 | dsp_wdt_enable(false); |
| 593 | 676 | ||
| 594 | /* Reset DSP */ | 677 | /* This is a good place to clear the MMU page tables as well */ |
| 595 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, | 678 | if (dev_context->pt_attrs) { |
| 596 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 679 | pt_attrs = dev_context->pt_attrs; |
| 597 | 680 | memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); | |
| 681 | memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); | ||
| 682 | memset((u8 *) pt_attrs->pg_info, 0x00, | ||
| 683 | (pt_attrs->l2_num_pages * sizeof(struct page_info))); | ||
| 684 | } | ||
| 598 | /* Disable the mailbox interrupts */ | 685 | /* Disable the mailbox interrupts */ |
| 599 | if (dev_context->mbox) { | 686 | if (dev_context->mbox) { |
| 600 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); | 687 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); |
| 601 | omap_mbox_put(dev_context->mbox); | 688 | omap_mbox_put(dev_context->mbox); |
| 602 | dev_context->mbox = NULL; | 689 | dev_context->mbox = NULL; |
| 603 | } | 690 | } |
| 604 | if (dev_context->dsp_mmu) { | 691 | /* Reset IVA2 clocks*/ |
| 605 | pr_err("Proc stop mmu if statement\n"); | 692 | (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | |
| 606 | for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { | 693 | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
| 607 | if (!tlb[i].ul_gpp_pa) | ||
| 608 | continue; | ||
| 609 | iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va); | ||
| 610 | } | ||
| 611 | i = 0; | ||
| 612 | while (l4_peripheral_table[i].phys_addr) { | ||
| 613 | iommu_kunmap(dev_context->dsp_mmu, | ||
| 614 | l4_peripheral_table[i].dsp_virt_addr); | ||
| 615 | i++; | ||
| 616 | } | ||
| 617 | iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da); | ||
| 618 | iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da); | ||
| 619 | dsp_mmu_exit(dev_context->dsp_mmu); | ||
| 620 | dev_context->dsp_mmu = NULL; | ||
| 621 | } | ||
| 622 | /* Reset IVA IOMMU*/ | ||
| 623 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | ||
| 624 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
| 625 | 694 | ||
| 626 | dsp_clock_disable_all(dev_context->dsp_per_clks); | 695 | dsp_clock_disable_all(dev_context->dsp_per_clks); |
| 627 | dsp_clk_disable(DSP_CLK_IVA2); | 696 | dsp_clk_disable(DSP_CLK_IVA2); |
| @@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context | |||
| 681 | struct bridge_dev_context *dev_context = NULL; | 750 | struct bridge_dev_context *dev_context = NULL; |
| 682 | s32 entry_ndx; | 751 | s32 entry_ndx; |
| 683 | struct cfg_hostres *resources = config_param; | 752 | struct cfg_hostres *resources = config_param; |
| 753 | struct pg_table_attrs *pt_attrs; | ||
| 754 | u32 pg_tbl_pa; | ||
| 755 | u32 pg_tbl_va; | ||
| 756 | u32 align_size; | ||
| 684 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | 757 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
| 685 | 758 | ||
| 686 | /* Allocate and initialize a data structure to contain the bridge driver | 759 | /* Allocate and initialize a data structure to contain the bridge driver |
| @@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context | |||
| 711 | if (!dev_context->dw_dsp_base_addr) | 784 | if (!dev_context->dw_dsp_base_addr) |
| 712 | status = -EPERM; | 785 | status = -EPERM; |
| 713 | 786 | ||
| 787 | pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); | ||
| 788 | if (pt_attrs != NULL) { | ||
| 789 | /* Assuming that we use only DSP's memory map | ||
| 790 | * until 0x4000:0000 , we would need only 1024 | ||
| 791 | * L1 enties i.e L1 size = 4K */ | ||
| 792 | pt_attrs->l1_size = 0x1000; | ||
| 793 | align_size = pt_attrs->l1_size; | ||
| 794 | /* Align sizes are expected to be power of 2 */ | ||
| 795 | /* we like to get aligned on L1 table size */ | ||
| 796 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, | ||
| 797 | align_size, &pg_tbl_pa); | ||
| 798 | |||
| 799 | /* Check if the PA is aligned for us */ | ||
| 800 | if ((pg_tbl_pa) & (align_size - 1)) { | ||
| 801 | /* PA not aligned to page table size , | ||
| 802 | * try with more allocation and align */ | ||
| 803 | mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, | ||
| 804 | pt_attrs->l1_size); | ||
| 805 | /* we like to get aligned on L1 table size */ | ||
| 806 | pg_tbl_va = | ||
| 807 | (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, | ||
| 808 | align_size, &pg_tbl_pa); | ||
| 809 | /* We should be able to get aligned table now */ | ||
| 810 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | ||
| 811 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | ||
| 812 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; | ||
| 813 | /* Align the PA to the next 'align' boundary */ | ||
| 814 | pt_attrs->l1_base_pa = | ||
| 815 | ((pg_tbl_pa) + | ||
| 816 | (align_size - 1)) & (~(align_size - 1)); | ||
| 817 | pt_attrs->l1_base_va = | ||
| 818 | pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); | ||
| 819 | } else { | ||
| 820 | /* We got aligned PA, cool */ | ||
| 821 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | ||
| 822 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | ||
| 823 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; | ||
| 824 | pt_attrs->l1_base_pa = pg_tbl_pa; | ||
| 825 | pt_attrs->l1_base_va = pg_tbl_va; | ||
| 826 | } | ||
| 827 | if (pt_attrs->l1_base_va) | ||
| 828 | memset((u8 *) pt_attrs->l1_base_va, 0x00, | ||
| 829 | pt_attrs->l1_size); | ||
| 830 | |||
| 831 | /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + | ||
| 832 | * L4 pages */ | ||
| 833 | pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); | ||
| 834 | pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * | ||
| 835 | pt_attrs->l2_num_pages; | ||
| 836 | align_size = 4; /* Make it u32 aligned */ | ||
| 837 | /* we like to get aligned on L1 table size */ | ||
| 838 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, | ||
| 839 | align_size, &pg_tbl_pa); | ||
| 840 | pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; | ||
| 841 | pt_attrs->l2_tbl_alloc_va = pg_tbl_va; | ||
| 842 | pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; | ||
| 843 | pt_attrs->l2_base_pa = pg_tbl_pa; | ||
| 844 | pt_attrs->l2_base_va = pg_tbl_va; | ||
| 845 | |||
| 846 | if (pt_attrs->l2_base_va) | ||
| 847 | memset((u8 *) pt_attrs->l2_base_va, 0x00, | ||
| 848 | pt_attrs->l2_size); | ||
| 849 | |||
| 850 | pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * | ||
| 851 | sizeof(struct page_info), GFP_KERNEL); | ||
| 852 | dev_dbg(bridge, | ||
| 853 | "L1 pa %x, va %x, size %x\n L2 pa %x, va " | ||
| 854 | "%x, size %x\n", pt_attrs->l1_base_pa, | ||
| 855 | pt_attrs->l1_base_va, pt_attrs->l1_size, | ||
| 856 | pt_attrs->l2_base_pa, pt_attrs->l2_base_va, | ||
| 857 | pt_attrs->l2_size); | ||
| 858 | dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", | ||
| 859 | pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); | ||
| 860 | } | ||
| 861 | if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && | ||
| 862 | (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) | ||
| 863 | dev_context->pt_attrs = pt_attrs; | ||
| 864 | else | ||
| 865 | status = -ENOMEM; | ||
| 866 | |||
| 714 | if (!status) { | 867 | if (!status) { |
| 868 | spin_lock_init(&pt_attrs->pg_lock); | ||
| 715 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; | 869 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; |
| 870 | |||
| 871 | /* Set the Clock Divisor for the DSP module */ | ||
| 872 | udelay(5); | ||
| 873 | /* MMU address is obtained from the host | ||
| 874 | * resources struct */ | ||
| 875 | dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; | ||
| 876 | } | ||
| 877 | if (!status) { | ||
| 716 | dev_context->hdev_obj = hdev_obj; | 878 | dev_context->hdev_obj = hdev_obj; |
| 717 | /* Store current board state. */ | 879 | /* Store current board state. */ |
| 718 | dev_context->dw_brd_state = BRD_UNKNOWN; | 880 | dev_context->dw_brd_state = BRD_UNKNOWN; |
| @@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context | |||
| 722 | /* Return ptr to our device state to the DSP API for storage */ | 884 | /* Return ptr to our device state to the DSP API for storage */ |
| 723 | *dev_cntxt = dev_context; | 885 | *dev_cntxt = dev_context; |
| 724 | } else { | 886 | } else { |
| 887 | if (pt_attrs != NULL) { | ||
| 888 | kfree(pt_attrs->pg_info); | ||
| 889 | |||
| 890 | if (pt_attrs->l2_tbl_alloc_va) { | ||
| 891 | mem_free_phys_mem((void *) | ||
| 892 | pt_attrs->l2_tbl_alloc_va, | ||
| 893 | pt_attrs->l2_tbl_alloc_pa, | ||
| 894 | pt_attrs->l2_tbl_alloc_sz); | ||
| 895 | } | ||
| 896 | if (pt_attrs->l1_tbl_alloc_va) { | ||
| 897 | mem_free_phys_mem((void *) | ||
| 898 | pt_attrs->l1_tbl_alloc_va, | ||
| 899 | pt_attrs->l1_tbl_alloc_pa, | ||
| 900 | pt_attrs->l1_tbl_alloc_sz); | ||
| 901 | } | ||
| 902 | } | ||
| 903 | kfree(pt_attrs); | ||
| 725 | kfree(dev_context); | 904 | kfree(dev_context); |
| 726 | } | 905 | } |
| 727 | func_end: | 906 | func_end: |
| @@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | |||
| 789 | */ | 968 | */ |
| 790 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | 969 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) |
| 791 | { | 970 | { |
| 971 | struct pg_table_attrs *pt_attrs; | ||
| 792 | int status = 0; | 972 | int status = 0; |
| 793 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) | 973 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) |
| 794 | dev_ctxt; | 974 | dev_ctxt; |
| @@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | |||
| 802 | 982 | ||
| 803 | /* first put the device to stop state */ | 983 | /* first put the device to stop state */ |
| 804 | bridge_brd_stop(dev_context); | 984 | bridge_brd_stop(dev_context); |
| 985 | if (dev_context->pt_attrs) { | ||
| 986 | pt_attrs = dev_context->pt_attrs; | ||
| 987 | kfree(pt_attrs->pg_info); | ||
| 988 | |||
| 989 | if (pt_attrs->l2_tbl_alloc_va) { | ||
| 990 | mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, | ||
| 991 | pt_attrs->l2_tbl_alloc_pa, | ||
| 992 | pt_attrs->l2_tbl_alloc_sz); | ||
| 993 | } | ||
| 994 | if (pt_attrs->l1_tbl_alloc_va) { | ||
| 995 | mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, | ||
| 996 | pt_attrs->l1_tbl_alloc_pa, | ||
| 997 | pt_attrs->l1_tbl_alloc_sz); | ||
| 998 | } | ||
| 999 | kfree(pt_attrs); | ||
| 1000 | |||
| 1001 | } | ||
| 805 | 1002 | ||
| 806 | if (dev_context->resources) { | 1003 | if (dev_context->resources) { |
| 807 | host_res = dev_context->resources; | 1004 | host_res = dev_context->resources; |
| @@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | |||
| 832 | iounmap((void *)host_res->dw_mem_base[3]); | 1029 | iounmap((void *)host_res->dw_mem_base[3]); |
| 833 | if (host_res->dw_mem_base[4]) | 1030 | if (host_res->dw_mem_base[4]) |
| 834 | iounmap((void *)host_res->dw_mem_base[4]); | 1031 | iounmap((void *)host_res->dw_mem_base[4]); |
| 1032 | if (host_res->dw_dmmu_base) | ||
| 1033 | iounmap(host_res->dw_dmmu_base); | ||
| 835 | if (host_res->dw_per_base) | 1034 | if (host_res->dw_per_base) |
| 836 | iounmap(host_res->dw_per_base); | 1035 | iounmap(host_res->dw_per_base); |
| 837 | if (host_res->dw_per_pm_base) | 1036 | if (host_res->dw_per_pm_base) |
| @@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | |||
| 845 | host_res->dw_mem_base[2] = (u32) NULL; | 1044 | host_res->dw_mem_base[2] = (u32) NULL; |
| 846 | host_res->dw_mem_base[3] = (u32) NULL; | 1045 | host_res->dw_mem_base[3] = (u32) NULL; |
| 847 | host_res->dw_mem_base[4] = (u32) NULL; | 1046 | host_res->dw_mem_base[4] = (u32) NULL; |
| 1047 | host_res->dw_dmmu_base = NULL; | ||
| 848 | host_res->dw_sys_ctrl_base = NULL; | 1048 | host_res->dw_sys_ctrl_base = NULL; |
| 849 | 1049 | ||
| 850 | kfree(host_res); | 1050 | kfree(host_res); |
| @@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | |||
| 928 | } | 1128 | } |
| 929 | 1129 | ||
| 930 | /* | 1130 | /* |
| 1131 | * ======== bridge_brd_mem_map ======== | ||
| 1132 | * This function maps MPU buffer to the DSP address space. It performs | ||
| 1133 | * linear to physical address translation if required. It translates each | ||
| 1134 | * page since linear addresses can be physically non-contiguous | ||
| 1135 | * All address & size arguments are assumed to be page aligned (in proc.c) | ||
| 1136 | * | ||
| 1137 | * TODO: Disable MMU while updating the page tables (but that'll stall DSP) | ||
| 1138 | */ | ||
| 1139 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | ||
| 1140 | u32 ul_mpu_addr, u32 virt_addr, | ||
| 1141 | u32 ul_num_bytes, u32 ul_map_attr, | ||
| 1142 | struct page **mapped_pages) | ||
| 1143 | { | ||
| 1144 | u32 attrs; | ||
| 1145 | int status = 0; | ||
| 1146 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
| 1147 | struct hw_mmu_map_attrs_t hw_attrs; | ||
| 1148 | struct vm_area_struct *vma; | ||
| 1149 | struct mm_struct *mm = current->mm; | ||
| 1150 | u32 write = 0; | ||
| 1151 | u32 num_usr_pgs = 0; | ||
| 1152 | struct page *mapped_page, *pg; | ||
| 1153 | s32 pg_num; | ||
| 1154 | u32 va = virt_addr; | ||
| 1155 | struct task_struct *curr_task = current; | ||
| 1156 | u32 pg_i = 0; | ||
| 1157 | u32 mpu_addr, pa; | ||
| 1158 | |||
| 1159 | dev_dbg(bridge, | ||
| 1160 | "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", | ||
| 1161 | __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, | ||
| 1162 | ul_map_attr); | ||
| 1163 | if (ul_num_bytes == 0) | ||
| 1164 | return -EINVAL; | ||
| 1165 | |||
| 1166 | if (ul_map_attr & DSP_MAP_DIR_MASK) { | ||
| 1167 | attrs = ul_map_attr; | ||
| 1168 | } else { | ||
| 1169 | /* Assign default attributes */ | ||
| 1170 | attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); | ||
| 1171 | } | ||
| 1172 | /* Take mapping properties */ | ||
| 1173 | if (attrs & DSP_MAPBIGENDIAN) | ||
| 1174 | hw_attrs.endianism = HW_BIG_ENDIAN; | ||
| 1175 | else | ||
| 1176 | hw_attrs.endianism = HW_LITTLE_ENDIAN; | ||
| 1177 | |||
| 1178 | hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) | ||
| 1179 | ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); | ||
| 1180 | /* Ignore element_size if mixed_size is enabled */ | ||
| 1181 | if (hw_attrs.mixed_size == 0) { | ||
| 1182 | if (attrs & DSP_MAPELEMSIZE8) { | ||
| 1183 | /* Size is 8 bit */ | ||
| 1184 | hw_attrs.element_size = HW_ELEM_SIZE8BIT; | ||
| 1185 | } else if (attrs & DSP_MAPELEMSIZE16) { | ||
| 1186 | /* Size is 16 bit */ | ||
| 1187 | hw_attrs.element_size = HW_ELEM_SIZE16BIT; | ||
| 1188 | } else if (attrs & DSP_MAPELEMSIZE32) { | ||
| 1189 | /* Size is 32 bit */ | ||
| 1190 | hw_attrs.element_size = HW_ELEM_SIZE32BIT; | ||
| 1191 | } else if (attrs & DSP_MAPELEMSIZE64) { | ||
| 1192 | /* Size is 64 bit */ | ||
| 1193 | hw_attrs.element_size = HW_ELEM_SIZE64BIT; | ||
| 1194 | } else { | ||
| 1195 | /* | ||
| 1196 | * Mixedsize isn't enabled, so size can't be | ||
| 1197 | * zero here | ||
| 1198 | */ | ||
| 1199 | return -EINVAL; | ||
| 1200 | } | ||
| 1201 | } | ||
| 1202 | if (attrs & DSP_MAPDONOTLOCK) | ||
| 1203 | hw_attrs.donotlockmpupage = 1; | ||
| 1204 | else | ||
| 1205 | hw_attrs.donotlockmpupage = 0; | ||
| 1206 | |||
| 1207 | if (attrs & DSP_MAPVMALLOCADDR) { | ||
| 1208 | return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, | ||
| 1209 | ul_num_bytes, &hw_attrs); | ||
| 1210 | } | ||
| 1211 | /* | ||
| 1212 | * Do OS-specific user-va to pa translation. | ||
| 1213 | * Combine physically contiguous regions to reduce TLBs. | ||
| 1214 | * Pass the translated pa to pte_update. | ||
| 1215 | */ | ||
| 1216 | if ((attrs & DSP_MAPPHYSICALADDR)) { | ||
| 1217 | status = pte_update(dev_context, ul_mpu_addr, virt_addr, | ||
| 1218 | ul_num_bytes, &hw_attrs); | ||
| 1219 | goto func_cont; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | /* | ||
| 1223 | * Important Note: ul_mpu_addr is mapped from user application process | ||
| 1224 | * to current process - it must lie completely within the current | ||
| 1225 | * virtual memory address space in order to be of use to us here! | ||
| 1226 | */ | ||
| 1227 | down_read(&mm->mmap_sem); | ||
| 1228 | vma = find_vma(mm, ul_mpu_addr); | ||
| 1229 | if (vma) | ||
| 1230 | dev_dbg(bridge, | ||
| 1231 | "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " | ||
| 1232 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | ||
| 1233 | ul_num_bytes, vma->vm_start, vma->vm_end, | ||
| 1234 | vma->vm_flags); | ||
| 1235 | |||
| 1236 | /* | ||
| 1237 | * It is observed that under some circumstances, the user buffer is | ||
| 1238 | * spread across several VMAs. So loop through and check if the entire | ||
| 1239 | * user buffer is covered | ||
| 1240 | */ | ||
| 1241 | while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { | ||
| 1242 | /* jump to the next VMA region */ | ||
| 1243 | vma = find_vma(mm, vma->vm_end + 1); | ||
| 1244 | dev_dbg(bridge, | ||
| 1245 | "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " | ||
| 1246 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | ||
| 1247 | ul_num_bytes, vma->vm_start, vma->vm_end, | ||
| 1248 | vma->vm_flags); | ||
| 1249 | } | ||
| 1250 | if (!vma) { | ||
| 1251 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | ||
| 1252 | __func__, ul_mpu_addr, ul_num_bytes); | ||
| 1253 | status = -EINVAL; | ||
| 1254 | up_read(&mm->mmap_sem); | ||
| 1255 | goto func_cont; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | if (vma->vm_flags & VM_IO) { | ||
| 1259 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | ||
| 1260 | mpu_addr = ul_mpu_addr; | ||
| 1261 | |||
| 1262 | /* Get the physical addresses for user buffer */ | ||
| 1263 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | ||
| 1264 | pa = user_va2_pa(mm, mpu_addr); | ||
| 1265 | if (!pa) { | ||
| 1266 | status = -EPERM; | ||
| 1267 | pr_err("DSPBRIDGE: VM_IO mapping physical" | ||
| 1268 | "address is invalid\n"); | ||
| 1269 | break; | ||
| 1270 | } | ||
| 1271 | if (pfn_valid(__phys_to_pfn(pa))) { | ||
| 1272 | pg = PHYS_TO_PAGE(pa); | ||
| 1273 | get_page(pg); | ||
| 1274 | if (page_count(pg) < 1) { | ||
| 1275 | pr_err("Bad page in VM_IO buffer\n"); | ||
| 1276 | bad_page_dump(pa, pg); | ||
| 1277 | } | ||
| 1278 | } | ||
| 1279 | status = pte_set(dev_context->pt_attrs, pa, | ||
| 1280 | va, HW_PAGE_SIZE4KB, &hw_attrs); | ||
| 1281 | if (status) | ||
| 1282 | break; | ||
| 1283 | |||
| 1284 | va += HW_PAGE_SIZE4KB; | ||
| 1285 | mpu_addr += HW_PAGE_SIZE4KB; | ||
| 1286 | pa += HW_PAGE_SIZE4KB; | ||
| 1287 | } | ||
| 1288 | } else { | ||
| 1289 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | ||
| 1290 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | ||
| 1291 | write = 1; | ||
| 1292 | |||
| 1293 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | ||
| 1294 | pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, | ||
| 1295 | write, 1, &mapped_page, NULL); | ||
| 1296 | if (pg_num > 0) { | ||
| 1297 | if (page_count(mapped_page) < 1) { | ||
| 1298 | pr_err("Bad page count after doing" | ||
| 1299 | "get_user_pages on" | ||
| 1300 | "user buffer\n"); | ||
| 1301 | bad_page_dump(page_to_phys(mapped_page), | ||
| 1302 | mapped_page); | ||
| 1303 | } | ||
| 1304 | status = pte_set(dev_context->pt_attrs, | ||
| 1305 | page_to_phys(mapped_page), va, | ||
| 1306 | HW_PAGE_SIZE4KB, &hw_attrs); | ||
| 1307 | if (status) | ||
| 1308 | break; | ||
| 1309 | |||
| 1310 | if (mapped_pages) | ||
| 1311 | mapped_pages[pg_i] = mapped_page; | ||
| 1312 | |||
| 1313 | va += HW_PAGE_SIZE4KB; | ||
| 1314 | ul_mpu_addr += HW_PAGE_SIZE4KB; | ||
| 1315 | } else { | ||
| 1316 | pr_err("DSPBRIDGE: get_user_pages FAILED," | ||
| 1317 | "MPU addr = 0x%x," | ||
| 1318 | "vma->vm_flags = 0x%lx," | ||
| 1319 | "get_user_pages Err" | ||
| 1320 | "Value = %d, Buffer" | ||
| 1321 | "size=0x%x\n", ul_mpu_addr, | ||
| 1322 | vma->vm_flags, pg_num, ul_num_bytes); | ||
| 1323 | status = -EPERM; | ||
| 1324 | break; | ||
| 1325 | } | ||
| 1326 | } | ||
| 1327 | } | ||
| 1328 | up_read(&mm->mmap_sem); | ||
| 1329 | func_cont: | ||
| 1330 | if (status) { | ||
| 1331 | /* | ||
| 1332 | * Roll out the mapped pages incase it failed in middle of | ||
| 1333 | * mapping | ||
| 1334 | */ | ||
| 1335 | if (pg_i) { | ||
| 1336 | bridge_brd_mem_un_map(dev_context, virt_addr, | ||
| 1337 | (pg_i * PG_SIZE4K)); | ||
| 1338 | } | ||
| 1339 | status = -EPERM; | ||
| 1340 | } | ||
| 1341 | /* | ||
| 1342 | * In any case, flush the TLB | ||
| 1343 | * This is called from here instead from pte_update to avoid unnecessary | ||
| 1344 | * repetition while mapping non-contiguous physical regions of a virtual | ||
| 1345 | * region | ||
| 1346 | */ | ||
| 1347 | flush_all(dev_context); | ||
| 1348 | dev_dbg(bridge, "%s status %x\n", __func__, status); | ||
| 1349 | return status; | ||
| 1350 | } | ||
| 1351 | |||
| 1352 | /* | ||
| 1353 | * ======== bridge_brd_mem_un_map ======== | ||
| 1354 | * Invalidate the PTEs for the DSP VA block to be unmapped. | ||
| 1355 | * | ||
| 1356 | * PTEs of a mapped memory block are contiguous in any page table | ||
| 1357 | * So, instead of looking up the PTE address for every 4K block, | ||
| 1358 | * we clear consecutive PTEs until we unmap all the bytes | ||
| 1359 | */ | ||
| 1360 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | ||
| 1361 | u32 virt_addr, u32 ul_num_bytes) | ||
| 1362 | { | ||
| 1363 | u32 l1_base_va; | ||
| 1364 | u32 l2_base_va; | ||
| 1365 | u32 l2_base_pa; | ||
| 1366 | u32 l2_page_num; | ||
| 1367 | u32 pte_val; | ||
| 1368 | u32 pte_size; | ||
| 1369 | u32 pte_count; | ||
| 1370 | u32 pte_addr_l1; | ||
| 1371 | u32 pte_addr_l2 = 0; | ||
| 1372 | u32 rem_bytes; | ||
| 1373 | u32 rem_bytes_l2; | ||
| 1374 | u32 va_curr; | ||
| 1375 | struct page *pg = NULL; | ||
| 1376 | int status = 0; | ||
| 1377 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
| 1378 | struct pg_table_attrs *pt = dev_context->pt_attrs; | ||
| 1379 | u32 temp; | ||
| 1380 | u32 paddr; | ||
| 1381 | u32 numof4k_pages = 0; | ||
| 1382 | |||
| 1383 | va_curr = virt_addr; | ||
| 1384 | rem_bytes = ul_num_bytes; | ||
| 1385 | rem_bytes_l2 = 0; | ||
| 1386 | l1_base_va = pt->l1_base_va; | ||
| 1387 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | ||
| 1388 | dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " | ||
| 1389 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, | ||
| 1390 | ul_num_bytes, l1_base_va, pte_addr_l1); | ||
| 1391 | |||
| 1392 | while (rem_bytes && !status) { | ||
| 1393 | u32 va_curr_orig = va_curr; | ||
| 1394 | /* Find whether the L1 PTE points to a valid L2 PT */ | ||
| 1395 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | ||
| 1396 | pte_val = *(u32 *) pte_addr_l1; | ||
| 1397 | pte_size = hw_mmu_pte_size_l1(pte_val); | ||
| 1398 | |||
| 1399 | if (pte_size != HW_MMU_COARSE_PAGE_SIZE) | ||
| 1400 | goto skip_coarse_page; | ||
| 1401 | |||
| 1402 | /* | ||
| 1403 | * Get the L2 PA from the L1 PTE, and find | ||
| 1404 | * corresponding L2 VA | ||
| 1405 | */ | ||
| 1406 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | ||
| 1407 | l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | ||
| 1408 | l2_page_num = | ||
| 1409 | (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | ||
| 1410 | /* | ||
| 1411 | * Find the L2 PTE address from which we will start | ||
| 1412 | * clearing, the number of PTEs to be cleared on this | ||
| 1413 | * page, and the size of VA space that needs to be | ||
| 1414 | * cleared on this L2 page | ||
| 1415 | */ | ||
| 1416 | pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); | ||
| 1417 | pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); | ||
| 1418 | pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); | ||
| 1419 | if (rem_bytes < (pte_count * PG_SIZE4K)) | ||
| 1420 | pte_count = rem_bytes / PG_SIZE4K; | ||
| 1421 | rem_bytes_l2 = pte_count * PG_SIZE4K; | ||
| 1422 | |||
| 1423 | /* | ||
| 1424 | * Unmap the VA space on this L2 PT. A quicker way | ||
| 1425 | * would be to clear pte_count entries starting from | ||
| 1426 | * pte_addr_l2. However, below code checks that we don't | ||
| 1427 | * clear invalid entries or less than 64KB for a 64KB | ||
| 1428 | * entry. Similar checking is done for L1 PTEs too | ||
| 1429 | * below | ||
| 1430 | */ | ||
| 1431 | while (rem_bytes_l2 && !status) { | ||
| 1432 | pte_val = *(u32 *) pte_addr_l2; | ||
| 1433 | pte_size = hw_mmu_pte_size_l2(pte_val); | ||
| 1434 | /* va_curr aligned to pte_size? */ | ||
| 1435 | if (pte_size == 0 || rem_bytes_l2 < pte_size || | ||
| 1436 | va_curr & (pte_size - 1)) { | ||
| 1437 | status = -EPERM; | ||
| 1438 | break; | ||
| 1439 | } | ||
| 1440 | |||
| 1441 | /* Collect Physical addresses from VA */ | ||
| 1442 | paddr = (pte_val & ~(pte_size - 1)); | ||
| 1443 | if (pte_size == HW_PAGE_SIZE64KB) | ||
| 1444 | numof4k_pages = 16; | ||
| 1445 | else | ||
| 1446 | numof4k_pages = 1; | ||
| 1447 | temp = 0; | ||
| 1448 | while (temp++ < numof4k_pages) { | ||
| 1449 | if (!pfn_valid(__phys_to_pfn(paddr))) { | ||
| 1450 | paddr += HW_PAGE_SIZE4KB; | ||
| 1451 | continue; | ||
| 1452 | } | ||
| 1453 | pg = PHYS_TO_PAGE(paddr); | ||
| 1454 | if (page_count(pg) < 1) { | ||
| 1455 | pr_info("DSPBRIDGE: UNMAP function: " | ||
| 1456 | "COUNT 0 FOR PA 0x%x, size = " | ||
| 1457 | "0x%x\n", paddr, ul_num_bytes); | ||
| 1458 | bad_page_dump(paddr, pg); | ||
| 1459 | } else { | ||
| 1460 | set_page_dirty(pg); | ||
| 1461 | page_cache_release(pg); | ||
| 1462 | } | ||
| 1463 | paddr += HW_PAGE_SIZE4KB; | ||
| 1464 | } | ||
| 1465 | if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { | ||
| 1466 | status = -EPERM; | ||
| 1467 | goto EXIT_LOOP; | ||
| 1468 | } | ||
| 1469 | |||
| 1470 | status = 0; | ||
| 1471 | rem_bytes_l2 -= pte_size; | ||
| 1472 | va_curr += pte_size; | ||
| 1473 | pte_addr_l2 += (pte_size >> 12) * sizeof(u32); | ||
| 1474 | } | ||
| 1475 | spin_lock(&pt->pg_lock); | ||
| 1476 | if (rem_bytes_l2 == 0) { | ||
| 1477 | pt->pg_info[l2_page_num].num_entries -= pte_count; | ||
| 1478 | if (pt->pg_info[l2_page_num].num_entries == 0) { | ||
| 1479 | /* | ||
| 1480 | * Clear the L1 PTE pointing to the L2 PT | ||
| 1481 | */ | ||
| 1482 | if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, | ||
| 1483 | HW_MMU_COARSE_PAGE_SIZE)) | ||
| 1484 | status = 0; | ||
| 1485 | else { | ||
| 1486 | status = -EPERM; | ||
| 1487 | spin_unlock(&pt->pg_lock); | ||
| 1488 | goto EXIT_LOOP; | ||
| 1489 | } | ||
| 1490 | } | ||
| 1491 | rem_bytes -= pte_count * PG_SIZE4K; | ||
| 1492 | } else | ||
| 1493 | status = -EPERM; | ||
| 1494 | |||
| 1495 | spin_unlock(&pt->pg_lock); | ||
| 1496 | continue; | ||
| 1497 | skip_coarse_page: | ||
| 1498 | /* va_curr aligned to pte_size? */ | ||
| 1499 | /* pte_size = 1 MB or 16 MB */ | ||
| 1500 | if (pte_size == 0 || rem_bytes < pte_size || | ||
| 1501 | va_curr & (pte_size - 1)) { | ||
| 1502 | status = -EPERM; | ||
| 1503 | break; | ||
| 1504 | } | ||
| 1505 | |||
| 1506 | if (pte_size == HW_PAGE_SIZE1MB) | ||
| 1507 | numof4k_pages = 256; | ||
| 1508 | else | ||
| 1509 | numof4k_pages = 4096; | ||
| 1510 | temp = 0; | ||
| 1511 | /* Collect Physical addresses from VA */ | ||
| 1512 | paddr = (pte_val & ~(pte_size - 1)); | ||
| 1513 | while (temp++ < numof4k_pages) { | ||
| 1514 | if (pfn_valid(__phys_to_pfn(paddr))) { | ||
| 1515 | pg = PHYS_TO_PAGE(paddr); | ||
| 1516 | if (page_count(pg) < 1) { | ||
| 1517 | pr_info("DSPBRIDGE: UNMAP function: " | ||
| 1518 | "COUNT 0 FOR PA 0x%x, size = " | ||
| 1519 | "0x%x\n", paddr, ul_num_bytes); | ||
| 1520 | bad_page_dump(paddr, pg); | ||
| 1521 | } else { | ||
| 1522 | set_page_dirty(pg); | ||
| 1523 | page_cache_release(pg); | ||
| 1524 | } | ||
| 1525 | } | ||
| 1526 | paddr += HW_PAGE_SIZE4KB; | ||
| 1527 | } | ||
| 1528 | if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { | ||
| 1529 | status = 0; | ||
| 1530 | rem_bytes -= pte_size; | ||
| 1531 | va_curr += pte_size; | ||
| 1532 | } else { | ||
| 1533 | status = -EPERM; | ||
| 1534 | goto EXIT_LOOP; | ||
| 1535 | } | ||
| 1536 | } | ||
| 1537 | /* | ||
| 1538 | * It is better to flush the TLB here, so that any stale old entries | ||
| 1539 | * get flushed | ||
| 1540 | */ | ||
| 1541 | EXIT_LOOP: | ||
| 1542 | flush_all(dev_context); | ||
| 1543 | dev_dbg(bridge, | ||
| 1544 | "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," | ||
| 1545 | " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, | ||
| 1546 | pte_addr_l2, rem_bytes, rem_bytes_l2, status); | ||
| 1547 | return status; | ||
| 1548 | } | ||
| 1549 | |||
| 1550 | /* | ||
| 1551 | * ======== user_va2_pa ======== | ||
| 1552 | * Purpose: | ||
| 1553 | * This function walks through the page tables to convert a userland | ||
| 1554 | * virtual address to physical address | ||
| 1555 | */ | ||
| 1556 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | ||
| 1557 | { | ||
| 1558 | pgd_t *pgd; | ||
| 1559 | pmd_t *pmd; | ||
| 1560 | pte_t *ptep, pte; | ||
| 1561 | |||
| 1562 | pgd = pgd_offset(mm, address); | ||
| 1563 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | ||
| 1564 | pmd = pmd_offset(pgd, address); | ||
| 1565 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | ||
| 1566 | ptep = pte_offset_map(pmd, address); | ||
| 1567 | if (ptep) { | ||
| 1568 | pte = *ptep; | ||
| 1569 | if (pte_present(pte)) | ||
| 1570 | return pte & PAGE_MASK; | ||
| 1571 | } | ||
| 1572 | } | ||
| 1573 | } | ||
| 1574 | |||
| 1575 | return 0; | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | /* | ||
| 1579 | * ======== pte_update ======== | ||
| 1580 | * This function calculates the optimum page-aligned addresses and sizes | ||
| 1581 | * Caller must pass page-aligned values | ||
| 1582 | */ | ||
| 1583 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | ||
| 1584 | u32 va, u32 size, | ||
| 1585 | struct hw_mmu_map_attrs_t *map_attrs) | ||
| 1586 | { | ||
| 1587 | u32 i; | ||
| 1588 | u32 all_bits; | ||
| 1589 | u32 pa_curr = pa; | ||
| 1590 | u32 va_curr = va; | ||
| 1591 | u32 num_bytes = size; | ||
| 1592 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
| 1593 | int status = 0; | ||
| 1594 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | ||
| 1595 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | ||
| 1596 | }; | ||
| 1597 | |||
| 1598 | while (num_bytes && !status) { | ||
| 1599 | /* To find the max. page size with which both PA & VA are | ||
| 1600 | * aligned */ | ||
| 1601 | all_bits = pa_curr | va_curr; | ||
| 1602 | |||
| 1603 | for (i = 0; i < 4; i++) { | ||
| 1604 | if ((num_bytes >= page_size[i]) && ((all_bits & | ||
| 1605 | (page_size[i] - | ||
| 1606 | 1)) == 0)) { | ||
| 1607 | status = | ||
| 1608 | pte_set(dev_context->pt_attrs, pa_curr, | ||
| 1609 | va_curr, page_size[i], map_attrs); | ||
| 1610 | pa_curr += page_size[i]; | ||
| 1611 | va_curr += page_size[i]; | ||
| 1612 | num_bytes -= page_size[i]; | ||
| 1613 | /* Don't try smaller sizes. Hopefully we have | ||
| 1614 | * reached an address aligned to a bigger page | ||
| 1615 | * size */ | ||
| 1616 | break; | ||
| 1617 | } | ||
| 1618 | } | ||
| 1619 | } | ||
| 1620 | |||
| 1621 | return status; | ||
| 1622 | } | ||
| 1623 | |||
| 1624 | /* | ||
| 1625 | * ======== pte_set ======== | ||
| 1626 | * This function calculates PTE address (MPU virtual) to be updated | ||
| 1627 | * It also manages the L2 page tables | ||
| 1628 | */ | ||
| 1629 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | ||
| 1630 | u32 size, struct hw_mmu_map_attrs_t *attrs) | ||
| 1631 | { | ||
| 1632 | u32 i; | ||
| 1633 | u32 pte_val; | ||
| 1634 | u32 pte_addr_l1; | ||
| 1635 | u32 pte_size; | ||
| 1636 | /* Base address of the PT that will be updated */ | ||
| 1637 | u32 pg_tbl_va; | ||
| 1638 | u32 l1_base_va; | ||
| 1639 | /* Compiler warns that the next three variables might be used | ||
| 1640 | * uninitialized in this function. Doesn't seem so. Working around, | ||
| 1641 | * anyways. */ | ||
| 1642 | u32 l2_base_va = 0; | ||
| 1643 | u32 l2_base_pa = 0; | ||
| 1644 | u32 l2_page_num = 0; | ||
| 1645 | int status = 0; | ||
| 1646 | |||
| 1647 | l1_base_va = pt->l1_base_va; | ||
| 1648 | pg_tbl_va = l1_base_va; | ||
| 1649 | if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { | ||
| 1650 | /* Find whether the L1 PTE points to a valid L2 PT */ | ||
| 1651 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); | ||
| 1652 | if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { | ||
| 1653 | pte_val = *(u32 *) pte_addr_l1; | ||
| 1654 | pte_size = hw_mmu_pte_size_l1(pte_val); | ||
| 1655 | } else { | ||
| 1656 | return -EPERM; | ||
| 1657 | } | ||
| 1658 | spin_lock(&pt->pg_lock); | ||
| 1659 | if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { | ||
| 1660 | /* Get the L2 PA from the L1 PTE, and find | ||
| 1661 | * corresponding L2 VA */ | ||
| 1662 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | ||
| 1663 | l2_base_va = | ||
| 1664 | l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | ||
| 1665 | l2_page_num = | ||
| 1666 | (l2_base_pa - | ||
| 1667 | pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | ||
| 1668 | } else if (pte_size == 0) { | ||
| 1669 | /* L1 PTE is invalid. Allocate a L2 PT and | ||
| 1670 | * point the L1 PTE to it */ | ||
| 1671 | /* Find a free L2 PT. */ | ||
| 1672 | for (i = 0; (i < pt->l2_num_pages) && | ||
| 1673 | (pt->pg_info[i].num_entries != 0); i++) | ||
| 1674 | ;; | ||
| 1675 | if (i < pt->l2_num_pages) { | ||
| 1676 | l2_page_num = i; | ||
| 1677 | l2_base_pa = pt->l2_base_pa + (l2_page_num * | ||
| 1678 | HW_MMU_COARSE_PAGE_SIZE); | ||
| 1679 | l2_base_va = pt->l2_base_va + (l2_page_num * | ||
| 1680 | HW_MMU_COARSE_PAGE_SIZE); | ||
| 1681 | /* Endianness attributes are ignored for | ||
| 1682 | * HW_MMU_COARSE_PAGE_SIZE */ | ||
| 1683 | status = | ||
| 1684 | hw_mmu_pte_set(l1_base_va, l2_base_pa, va, | ||
| 1685 | HW_MMU_COARSE_PAGE_SIZE, | ||
| 1686 | attrs); | ||
| 1687 | } else { | ||
| 1688 | status = -ENOMEM; | ||
| 1689 | } | ||
| 1690 | } else { | ||
| 1691 | /* Found valid L1 PTE of another size. | ||
| 1692 | * Should not overwrite it. */ | ||
| 1693 | status = -EPERM; | ||
| 1694 | } | ||
| 1695 | if (!status) { | ||
| 1696 | pg_tbl_va = l2_base_va; | ||
| 1697 | if (size == HW_PAGE_SIZE64KB) | ||
| 1698 | pt->pg_info[l2_page_num].num_entries += 16; | ||
| 1699 | else | ||
| 1700 | pt->pg_info[l2_page_num].num_entries++; | ||
| 1701 | dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " | ||
| 1702 | "%x, num_entries %x\n", l2_base_va, | ||
| 1703 | l2_base_pa, l2_page_num, | ||
| 1704 | pt->pg_info[l2_page_num].num_entries); | ||
| 1705 | } | ||
| 1706 | spin_unlock(&pt->pg_lock); | ||
| 1707 | } | ||
| 1708 | if (!status) { | ||
| 1709 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", | ||
| 1710 | pg_tbl_va, pa, va, size); | ||
| 1711 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " | ||
| 1712 | "mixed_size %x\n", attrs->endianism, | ||
| 1713 | attrs->element_size, attrs->mixed_size); | ||
| 1714 | status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); | ||
| 1715 | } | ||
| 1716 | |||
| 1717 | return status; | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | /* Memory map kernel VA -- memory allocated with vmalloc */ | ||
| 1721 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | ||
| 1722 | u32 ul_mpu_addr, u32 virt_addr, | ||
| 1723 | u32 ul_num_bytes, | ||
| 1724 | struct hw_mmu_map_attrs_t *hw_attrs) | ||
| 1725 | { | ||
| 1726 | int status = 0; | ||
| 1727 | struct page *page[1]; | ||
| 1728 | u32 i; | ||
| 1729 | u32 pa_curr; | ||
| 1730 | u32 pa_next; | ||
| 1731 | u32 va_curr; | ||
| 1732 | u32 size_curr; | ||
| 1733 | u32 num_pages; | ||
| 1734 | u32 pa; | ||
| 1735 | u32 num_of4k_pages; | ||
| 1736 | u32 temp = 0; | ||
| 1737 | |||
| 1738 | /* | ||
| 1739 | * Do Kernel va to pa translation. | ||
| 1740 | * Combine physically contiguous regions to reduce TLBs. | ||
| 1741 | * Pass the translated pa to pte_update. | ||
| 1742 | */ | ||
| 1743 | num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ | ||
| 1744 | i = 0; | ||
| 1745 | va_curr = ul_mpu_addr; | ||
| 1746 | page[0] = vmalloc_to_page((void *)va_curr); | ||
| 1747 | pa_next = page_to_phys(page[0]); | ||
| 1748 | while (!status && (i < num_pages)) { | ||
| 1749 | /* | ||
| 1750 | * Reuse pa_next from the previous iteraion to avoid | ||
| 1751 | * an extra va2pa call | ||
| 1752 | */ | ||
| 1753 | pa_curr = pa_next; | ||
| 1754 | size_curr = PAGE_SIZE; | ||
| 1755 | /* | ||
| 1756 | * If the next page is physically contiguous, | ||
| 1757 | * map it with the current one by increasing | ||
| 1758 | * the size of the region to be mapped | ||
| 1759 | */ | ||
| 1760 | while (++i < num_pages) { | ||
| 1761 | page[0] = | ||
| 1762 | vmalloc_to_page((void *)(va_curr + size_curr)); | ||
| 1763 | pa_next = page_to_phys(page[0]); | ||
| 1764 | |||
| 1765 | if (pa_next == (pa_curr + size_curr)) | ||
| 1766 | size_curr += PAGE_SIZE; | ||
| 1767 | else | ||
| 1768 | break; | ||
| 1769 | |||
| 1770 | } | ||
| 1771 | if (pa_next == 0) { | ||
| 1772 | status = -ENOMEM; | ||
| 1773 | break; | ||
| 1774 | } | ||
| 1775 | pa = pa_curr; | ||
| 1776 | num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; | ||
| 1777 | while (temp++ < num_of4k_pages) { | ||
| 1778 | get_page(PHYS_TO_PAGE(pa)); | ||
| 1779 | pa += HW_PAGE_SIZE4KB; | ||
| 1780 | } | ||
| 1781 | status = pte_update(dev_context, pa_curr, virt_addr + | ||
| 1782 | (va_curr - ul_mpu_addr), size_curr, | ||
| 1783 | hw_attrs); | ||
| 1784 | va_curr += size_curr; | ||
| 1785 | } | ||
| 1786 | /* | ||
| 1787 | * In any case, flush the TLB | ||
| 1788 | * This is called from here instead from pte_update to avoid unnecessary | ||
| 1789 | * repetition while mapping non-contiguous physical regions of a virtual | ||
| 1790 | * region | ||
| 1791 | */ | ||
| 1792 | flush_all(dev_context); | ||
| 1793 | dev_dbg(bridge, "%s status %x\n", __func__, status); | ||
| 1794 | return status; | ||
| 1795 | } | ||
| 1796 | |||
| 1797 | /* | ||
| 931 | * ======== wait_for_start ======== | 1798 | * ======== wait_for_start ======== |
| 932 | * Wait for the singal from DSP that it has started, or time out. | 1799 | * Wait for the singal from DSP that it has started, or time out. |
| 933 | */ | 1800 | */ |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c index b57a9fd5e757..fb9026e1403c 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c +++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c | |||
| @@ -31,6 +31,10 @@ | |||
| 31 | #include <dspbridge/dev.h> | 31 | #include <dspbridge/dev.h> |
| 32 | #include <dspbridge/iodefs.h> | 32 | #include <dspbridge/iodefs.h> |
| 33 | 33 | ||
| 34 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
| 35 | #include <hw_defs.h> | ||
| 36 | #include <hw_mmu.h> | ||
| 37 | |||
| 34 | #include <dspbridge/pwr_sh.h> | 38 | #include <dspbridge/pwr_sh.h> |
| 35 | 39 | ||
| 36 | /* ----------------------------------- Bridge Driver */ | 40 | /* ----------------------------------- Bridge Driver */ |
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c index 66dbf02549e4..ba2961049dad 100644 --- a/drivers/staging/tidspbridge/core/tiomap_io.c +++ b/drivers/staging/tidspbridge/core/tiomap_io.c | |||
| @@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | |||
| 134 | 134 | ||
| 135 | if (!status) { | 135 | if (!status) { |
| 136 | ul_tlb_base_virt = | 136 | ul_tlb_base_virt = |
| 137 | dev_context->sh_s.seg0_da * DSPWORDSIZE; | 137 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; |
| 138 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 138 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
| 139 | dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; | 139 | dw_ext_prog_virt_mem = |
| 140 | dev_context->atlb_entry[0].ul_gpp_va; | ||
| 140 | 141 | ||
| 141 | if (!trace_read) { | 142 | if (!trace_read) { |
| 142 | ul_shm_offset_virt = | 143 | ul_shm_offset_virt = |
| 143 | ul_shm_base_virt - ul_tlb_base_virt; | 144 | ul_shm_base_virt - ul_tlb_base_virt; |
| 144 | ul_shm_offset_virt += | 145 | ul_shm_offset_virt += |
| 145 | PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + | 146 | PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + |
| 146 | 1, PAGE_SIZE * 16); | 147 | 1, HW_PAGE_SIZE64KB); |
| 147 | dw_ext_prog_virt_mem -= ul_shm_offset_virt; | 148 | dw_ext_prog_virt_mem -= ul_shm_offset_virt; |
| 148 | dw_ext_prog_virt_mem += | 149 | dw_ext_prog_virt_mem += |
| 149 | (ul_ext_base - ul_dyn_ext_base); | 150 | (ul_ext_base - ul_dyn_ext_base); |
| @@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
| 317 | ret = -EPERM; | 318 | ret = -EPERM; |
| 318 | 319 | ||
| 319 | if (!ret) { | 320 | if (!ret) { |
| 320 | ul_tlb_base_virt = dev_context->sh_s.seg0_da * | 321 | ul_tlb_base_virt = |
| 321 | DSPWORDSIZE; | 322 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; |
| 322 | |||
| 323 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 323 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
| 324 | 324 | ||
| 325 | if (symbols_reloaded) { | 325 | if (symbols_reloaded) { |
| @@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
| 337 | ul_shm_base_virt - ul_tlb_base_virt; | 337 | ul_shm_base_virt - ul_tlb_base_virt; |
| 338 | if (trace_load) { | 338 | if (trace_load) { |
| 339 | dw_ext_prog_virt_mem = | 339 | dw_ext_prog_virt_mem = |
| 340 | dev_context->sh_s.seg0_va; | 340 | dev_context->atlb_entry[0].ul_gpp_va; |
| 341 | } else { | 341 | } else { |
| 342 | dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; | 342 | dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; |
| 343 | dw_ext_prog_virt_mem += | 343 | dw_ext_prog_virt_mem += |
| @@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) | |||
| 393 | omap_dspbridge_dev->dev.platform_data; | 393 | omap_dspbridge_dev->dev.platform_data; |
| 394 | struct cfg_hostres *resources = dev_context->resources; | 394 | struct cfg_hostres *resources = dev_context->resources; |
| 395 | int status = 0; | 395 | int status = 0; |
| 396 | u32 temp; | ||
| 396 | 397 | ||
| 397 | if (!dev_context->mbox) | 398 | if (!dev_context->mbox) |
| 398 | return 0; | 399 | return 0; |
| @@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) | |||
| 436 | omap_mbox_restore_ctx(dev_context->mbox); | 437 | omap_mbox_restore_ctx(dev_context->mbox); |
| 437 | 438 | ||
| 438 | /* Access MMU SYS CONFIG register to generate a short wakeup */ | 439 | /* Access MMU SYS CONFIG register to generate a short wakeup */ |
| 439 | iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); | 440 | temp = readl(resources->dw_dmmu_base + 0x10); |
| 440 | 441 | ||
| 441 | dev_context->dw_brd_state = BRD_RUNNING; | 442 | dev_context->dw_brd_state = BRD_RUNNING; |
| 442 | } else if (dev_context->dw_brd_state == BRD_RETENTION) { | 443 | } else if (dev_context->dw_brd_state == BRD_RETENTION) { |
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c index e24ea0c73914..3430418190da 100644 --- a/drivers/staging/tidspbridge/core/ue_deh.c +++ b/drivers/staging/tidspbridge/core/ue_deh.c | |||
| @@ -31,6 +31,57 @@ | |||
| 31 | #include <dspbridge/drv.h> | 31 | #include <dspbridge/drv.h> |
| 32 | #include <dspbridge/wdt.h> | 32 | #include <dspbridge/wdt.h> |
| 33 | 33 | ||
| 34 | static u32 fault_addr; | ||
| 35 | |||
| 36 | static void mmu_fault_dpc(unsigned long data) | ||
| 37 | { | ||
| 38 | struct deh_mgr *deh = (void *)data; | ||
| 39 | |||
| 40 | if (!deh) | ||
| 41 | return; | ||
| 42 | |||
| 43 | bridge_deh_notify(deh, DSP_MMUFAULT, 0); | ||
| 44 | } | ||
| 45 | |||
| 46 | static irqreturn_t mmu_fault_isr(int irq, void *data) | ||
| 47 | { | ||
| 48 | struct deh_mgr *deh = data; | ||
| 49 | struct cfg_hostres *resources; | ||
| 50 | u32 event; | ||
| 51 | |||
| 52 | if (!deh) | ||
| 53 | return IRQ_HANDLED; | ||
| 54 | |||
| 55 | resources = deh->hbridge_context->resources; | ||
| 56 | if (!resources) { | ||
| 57 | dev_dbg(bridge, "%s: Failed to get Host Resources\n", | ||
| 58 | __func__); | ||
| 59 | return IRQ_HANDLED; | ||
| 60 | } | ||
| 61 | |||
| 62 | hw_mmu_event_status(resources->dw_dmmu_base, &event); | ||
| 63 | if (event == HW_MMU_TRANSLATION_FAULT) { | ||
| 64 | hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr); | ||
| 65 | dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, | ||
| 66 | event, fault_addr); | ||
| 67 | /* | ||
| 68 | * Schedule a DPC directly. In the future, it may be | ||
| 69 | * necessary to check if DSP MMU fault is intended for | ||
| 70 | * Bridge. | ||
| 71 | */ | ||
| 72 | tasklet_schedule(&deh->dpc_tasklet); | ||
| 73 | |||
| 74 | /* Disable the MMU events, else once we clear it will | ||
| 75 | * start to raise INTs again */ | ||
| 76 | hw_mmu_event_disable(resources->dw_dmmu_base, | ||
| 77 | HW_MMU_TRANSLATION_FAULT); | ||
| 78 | } else { | ||
| 79 | hw_mmu_event_disable(resources->dw_dmmu_base, | ||
| 80 | HW_MMU_ALL_INTERRUPTS); | ||
| 81 | } | ||
| 82 | return IRQ_HANDLED; | ||
| 83 | } | ||
| 84 | |||
| 34 | int bridge_deh_create(struct deh_mgr **ret_deh, | 85 | int bridge_deh_create(struct deh_mgr **ret_deh, |
| 35 | struct dev_object *hdev_obj) | 86 | struct dev_object *hdev_obj) |
| 36 | { | 87 | { |
| @@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh, | |||
| 58 | } | 109 | } |
| 59 | ntfy_init(deh->ntfy_obj); | 110 | ntfy_init(deh->ntfy_obj); |
| 60 | 111 | ||
| 112 | /* Create a MMUfault DPC */ | ||
| 113 | tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); | ||
| 114 | |||
| 61 | /* Fill in context structure */ | 115 | /* Fill in context structure */ |
| 62 | deh->hbridge_context = hbridge_context; | 116 | deh->hbridge_context = hbridge_context; |
| 63 | 117 | ||
| 118 | /* Install ISR function for DSP MMU fault */ | ||
| 119 | status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, | ||
| 120 | "DspBridge\tiommu fault", deh); | ||
| 121 | if (status < 0) | ||
| 122 | goto err; | ||
| 123 | |||
| 64 | *ret_deh = deh; | 124 | *ret_deh = deh; |
| 65 | return 0; | 125 | return 0; |
| 66 | 126 | ||
| @@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh) | |||
| 80 | ntfy_delete(deh->ntfy_obj); | 140 | ntfy_delete(deh->ntfy_obj); |
| 81 | kfree(deh->ntfy_obj); | 141 | kfree(deh->ntfy_obj); |
| 82 | } | 142 | } |
| 143 | /* Disable DSP MMU fault */ | ||
| 144 | free_irq(INT_DSP_MMU_IRQ, deh); | ||
| 145 | |||
| 146 | /* Free DPC object */ | ||
| 147 | tasklet_kill(&deh->dpc_tasklet); | ||
| 83 | 148 | ||
| 84 | /* Deallocate the DEH manager object */ | 149 | /* Deallocate the DEH manager object */ |
| 85 | kfree(deh); | 150 | kfree(deh); |
| @@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, | |||
| 101 | return ntfy_unregister(deh->ntfy_obj, hnotification); | 166 | return ntfy_unregister(deh->ntfy_obj, hnotification); |
| 102 | } | 167 | } |
| 103 | 168 | ||
| 169 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
| 170 | static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) | ||
| 171 | { | ||
| 172 | struct cfg_hostres *resources; | ||
| 173 | struct hw_mmu_map_attrs_t map_attrs = { | ||
| 174 | .endianism = HW_LITTLE_ENDIAN, | ||
| 175 | .element_size = HW_ELEM_SIZE16BIT, | ||
| 176 | .mixed_size = HW_MMU_CPUES, | ||
| 177 | }; | ||
| 178 | void *dummy_va_addr; | ||
| 179 | |||
| 180 | resources = dev_context->resources; | ||
| 181 | dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); | ||
| 182 | |||
| 183 | /* | ||
| 184 | * Before acking the MMU fault, let's make sure MMU can only | ||
| 185 | * access entry #0. Then add a new entry so that the DSP OS | ||
| 186 | * can continue in order to dump the stack. | ||
| 187 | */ | ||
| 188 | hw_mmu_twl_disable(resources->dw_dmmu_base); | ||
| 189 | hw_mmu_tlb_flush_all(resources->dw_dmmu_base); | ||
| 190 | |||
| 191 | hw_mmu_tlb_add(resources->dw_dmmu_base, | ||
| 192 | virt_to_phys(dummy_va_addr), fault_addr, | ||
| 193 | HW_PAGE_SIZE4KB, 1, | ||
| 194 | &map_attrs, HW_SET, HW_SET); | ||
| 195 | |||
| 196 | dsp_clk_enable(DSP_CLK_GPT8); | ||
| 197 | |||
| 198 | dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); | ||
| 199 | |||
| 200 | /* Clear MMU interrupt */ | ||
| 201 | hw_mmu_event_ack(resources->dw_dmmu_base, | ||
| 202 | HW_MMU_TRANSLATION_FAULT); | ||
| 203 | dump_dsp_stack(dev_context); | ||
| 204 | dsp_clk_disable(DSP_CLK_GPT8); | ||
| 205 | |||
| 206 | hw_mmu_disable(resources->dw_dmmu_base); | ||
| 207 | free_page((unsigned long)dummy_va_addr); | ||
| 208 | } | ||
| 209 | #endif | ||
| 210 | |||
| 104 | static inline const char *event_to_string(int event) | 211 | static inline const char *event_to_string(int event) |
| 105 | { | 212 | { |
| 106 | switch (event) { | 213 | switch (event) { |
| @@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info) | |||
| 133 | #endif | 240 | #endif |
| 134 | break; | 241 | break; |
| 135 | case DSP_MMUFAULT: | 242 | case DSP_MMUFAULT: |
| 136 | dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); | 243 | dev_err(bridge, "%s: %s, addr=0x%x", __func__, |
| 244 | str, fault_addr); | ||
| 245 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
| 246 | print_dsp_trace_buffer(dev_context); | ||
| 247 | dump_dl_modules(dev_context); | ||
| 248 | mmu_fault_print_stack(dev_context); | ||
| 249 | #endif | ||
| 137 | break; | 250 | break; |
| 138 | default: | 251 | default: |
| 139 | dev_err(bridge, "%s: %s", __func__, str); | 252 | dev_err(bridge, "%s: %s", __func__, str); |
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h new file mode 100644 index 000000000000..e48d7f67c60a --- /dev/null +++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * EasiGlobal.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
| 7 | * | ||
| 8 | * This package is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _EASIGLOBAL_H | ||
| 18 | #define _EASIGLOBAL_H | ||
| 19 | #include <linux/types.h> | ||
| 20 | |||
| 21 | /* | ||
| 22 | * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE | ||
| 23 | * | ||
| 24 | * DESCRIPTION: Defines used to describe register types for EASI-checker tests. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #define READ_ONLY 1 | ||
| 28 | #define WRITE_ONLY 2 | ||
| 29 | #define READ_WRITE 3 | ||
| 30 | |||
| 31 | /* | ||
| 32 | * MACRO: _DEBUG_LEVEL1_EASI | ||
| 33 | * | ||
| 34 | * DESCRIPTION: A MACRO which can be used to indicate that a particular beach | ||
| 35 | * register access function was called. | ||
| 36 | * | ||
| 37 | * NOTE: We currently dont use this functionality. | ||
| 38 | */ | ||
| 39 | #define _DEBUG_LEVEL1_EASI(easi_num) ((void)0) | ||
| 40 | |||
| 41 | #endif /* _EASIGLOBAL_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h new file mode 100644 index 000000000000..1cefca321d71 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | /* | ||
| 2 | * MMUAccInt.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
| 7 | * | ||
| 8 | * This package is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _MMU_ACC_INT_H | ||
| 18 | #define _MMU_ACC_INT_H | ||
| 19 | |||
| 20 | /* Mappings of level 1 EASI function numbers to function names */ | ||
| 21 | |||
| 22 | #define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3) | ||
| 23 | #define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17) | ||
| 24 | #define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39) | ||
| 25 | #define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51) | ||
| 26 | #define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102) | ||
| 27 | #define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103) | ||
| 28 | #define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156) | ||
| 29 | #define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174) | ||
| 30 | #define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180) | ||
| 31 | #define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190) | ||
| 32 | #define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194) | ||
| 33 | #define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198) | ||
| 34 | #define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203) | ||
| 35 | #define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204) | ||
| 36 | #define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205) | ||
| 37 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209) | ||
| 38 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211) | ||
| 39 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212) | ||
| 40 | #define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213) | ||
| 41 | #define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214) | ||
| 42 | #define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226) | ||
| 43 | #define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268) | ||
| 44 | #define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322) | ||
| 45 | |||
| 46 | /* Register offset address definitions */ | ||
| 47 | #define MMU_MMU_SYSCONFIG_OFFSET 0x10 | ||
| 48 | #define MMU_MMU_IRQSTATUS_OFFSET 0x18 | ||
| 49 | #define MMU_MMU_IRQENABLE_OFFSET 0x1c | ||
| 50 | #define MMU_MMU_WALKING_ST_OFFSET 0x40 | ||
| 51 | #define MMU_MMU_CNTL_OFFSET 0x44 | ||
| 52 | #define MMU_MMU_FAULT_AD_OFFSET 0x48 | ||
| 53 | #define MMU_MMU_TTB_OFFSET 0x4c | ||
| 54 | #define MMU_MMU_LOCK_OFFSET 0x50 | ||
| 55 | #define MMU_MMU_LD_TLB_OFFSET 0x54 | ||
| 56 | #define MMU_MMU_CAM_OFFSET 0x58 | ||
| 57 | #define MMU_MMU_RAM_OFFSET 0x5c | ||
| 58 | #define MMU_MMU_GFLUSH_OFFSET 0x60 | ||
| 59 | #define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64 | ||
| 60 | /* Bitfield mask and offset declarations */ | ||
| 61 | #define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18 | ||
| 62 | #define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3 | ||
| 63 | #define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1 | ||
| 64 | #define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0 | ||
| 65 | #define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1 | ||
| 66 | #define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0 | ||
| 67 | #define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4 | ||
| 68 | #define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2 | ||
| 69 | #define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2 | ||
| 70 | #define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1 | ||
| 71 | #define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00 | ||
| 72 | #define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10 | ||
| 73 | #define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0 | ||
| 74 | #define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4 | ||
| 75 | |||
| 76 | #endif /* _MMU_ACC_INT_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h new file mode 100644 index 000000000000..ab1a16da731c --- /dev/null +++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h | |||
| @@ -0,0 +1,225 @@ | |||
| 1 | /* | ||
| 2 | * MMURegAcM.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
| 7 | * | ||
| 8 | * This package is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _MMU_REG_ACM_H | ||
| 18 | #define _MMU_REG_ACM_H | ||
| 19 | |||
| 20 | #include <linux/io.h> | ||
| 21 | #include <EasiGlobal.h> | ||
| 22 | |||
| 23 | #include "MMUAccInt.h" | ||
| 24 | |||
| 25 | #if defined(USE_LEVEL_1_MACROS) | ||
| 26 | |||
| 27 | #define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\ | ||
| 28 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\ | ||
| 29 | __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET)) | ||
| 30 | |||
| 31 | #define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\ | ||
| 32 | {\ | ||
| 33 | const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ | ||
| 34 | register u32 data = __raw_readl((base_address)+offset);\ | ||
| 35 | register u32 new_value = (value);\ | ||
| 36 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\ | ||
| 37 | data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\ | ||
| 38 | new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\ | ||
| 39 | new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\ | ||
| 40 | new_value |= data;\ | ||
| 41 | __raw_writel(new_value, base_address+offset);\ | ||
| 42 | } | ||
| 43 | |||
| 44 | #define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\ | ||
| 45 | {\ | ||
| 46 | const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ | ||
| 47 | register u32 data = __raw_readl((base_address)+offset);\ | ||
| 48 | register u32 new_value = (value);\ | ||
| 49 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\ | ||
| 50 | data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\ | ||
| 51 | new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\ | ||
| 52 | new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\ | ||
| 53 | new_value |= data;\ | ||
| 54 | __raw_writel(new_value, base_address+offset);\ | ||
| 55 | } | ||
| 56 | |||
| 57 | #define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\ | ||
| 58 | (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\ | ||
| 59 | __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET)) | ||
| 60 | |||
| 61 | #define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\ | ||
| 62 | {\ | ||
| 63 | const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ | ||
| 64 | register u32 new_value = (value);\ | ||
| 65 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\ | ||
| 66 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 67 | } | ||
| 68 | |||
| 69 | #define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\ | ||
| 70 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\ | ||
| 71 | __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET)) | ||
| 72 | |||
| 73 | #define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\ | ||
| 74 | {\ | ||
| 75 | const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ | ||
| 76 | register u32 new_value = (value);\ | ||
| 77 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\ | ||
| 78 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 79 | } | ||
| 80 | |||
| 81 | #define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\ | ||
| 82 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\ | ||
| 83 | (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\ | ||
| 84 | & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\ | ||
| 85 | MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET)) | ||
| 86 | |||
| 87 | #define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\ | ||
| 88 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\ | ||
| 89 | (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\ | ||
| 90 | MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\ | ||
| 91 | MMU_MMU_CNTL_TWL_ENABLE_OFFSET)) | ||
| 92 | |||
| 93 | #define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\ | ||
| 94 | {\ | ||
| 95 | const u32 offset = MMU_MMU_CNTL_OFFSET;\ | ||
| 96 | register u32 data = __raw_readl((base_address)+offset);\ | ||
| 97 | register u32 new_value = (value);\ | ||
| 98 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\ | ||
| 99 | data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\ | ||
| 100 | new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\ | ||
| 101 | new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\ | ||
| 102 | new_value |= data;\ | ||
| 103 | __raw_writel(new_value, base_address+offset);\ | ||
| 104 | } | ||
| 105 | |||
| 106 | #define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\ | ||
| 107 | {\ | ||
| 108 | const u32 offset = MMU_MMU_CNTL_OFFSET;\ | ||
| 109 | register u32 data = __raw_readl((base_address)+offset);\ | ||
| 110 | register u32 new_value = (value);\ | ||
| 111 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\ | ||
| 112 | data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\ | ||
| 113 | new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\ | ||
| 114 | new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\ | ||
| 115 | new_value |= data;\ | ||
| 116 | __raw_writel(new_value, base_address+offset);\ | ||
| 117 | } | ||
| 118 | |||
| 119 | #define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\ | ||
| 120 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\ | ||
| 121 | __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET)) | ||
| 122 | |||
| 123 | #define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\ | ||
| 124 | {\ | ||
| 125 | const u32 offset = MMU_MMU_TTB_OFFSET;\ | ||
| 126 | register u32 new_value = (value);\ | ||
| 127 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\ | ||
| 128 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 129 | } | ||
| 130 | |||
| 131 | #define MMUMMU_LOCK_READ_REGISTER32(base_address)\ | ||
| 132 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\ | ||
| 133 | __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET)) | ||
| 134 | |||
| 135 | #define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\ | ||
| 136 | {\ | ||
| 137 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
| 138 | register u32 new_value = (value);\ | ||
| 139 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\ | ||
| 140 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 141 | } | ||
| 142 | |||
| 143 | #define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\ | ||
| 144 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\ | ||
| 145 | (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ | ||
| 146 | MMU_MMU_LOCK_BASE_VALUE_MASK) >>\ | ||
| 147 | MMU_MMU_LOCK_BASE_VALUE_OFFSET)) | ||
| 148 | |||
| 149 | #define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\ | ||
| 150 | {\ | ||
| 151 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
| 152 | register u32 data = __raw_readl((base_address)+offset);\ | ||
| 153 | register u32 new_value = (value);\ | ||
| 154 | _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\ | ||
| 155 | data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\ | ||
| 156 | new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\ | ||
| 157 | new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\ | ||
| 158 | new_value |= data;\ | ||
| 159 | __raw_writel(new_value, base_address+offset);\ | ||
| 160 | } | ||
| 161 | |||
| 162 | #define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\ | ||
| 163 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\ | ||
| 164 | (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ | ||
| 165 | MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\ | ||
| 166 | MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET)) | ||
| 167 | |||
| 168 | #define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\ | ||
| 169 | {\ | ||
| 170 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
| 171 | register u32 data = __raw_readl((base_address)+offset);\ | ||
| 172 | register u32 new_value = (value);\ | ||
| 173 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\ | ||
| 174 | data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\ | ||
| 175 | new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\ | ||
| 176 | new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\ | ||
| 177 | new_value |= data;\ | ||
| 178 | __raw_writel(new_value, base_address+offset);\ | ||
| 179 | } | ||
| 180 | |||
| 181 | #define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\ | ||
| 182 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\ | ||
| 183 | (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\ | ||
| 184 | (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\ | ||
| 185 | MMU_MMU_LOCK_CURRENT_VICTIM_MASK))) | ||
| 186 | |||
| 187 | #define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\ | ||
| 188 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\ | ||
| 189 | __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET)) | ||
| 190 | |||
| 191 | #define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\ | ||
| 192 | {\ | ||
| 193 | const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ | ||
| 194 | register u32 new_value = (value);\ | ||
| 195 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\ | ||
| 196 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 197 | } | ||
| 198 | |||
| 199 | #define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\ | ||
| 200 | {\ | ||
| 201 | const u32 offset = MMU_MMU_CAM_OFFSET;\ | ||
| 202 | register u32 new_value = (value);\ | ||
| 203 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\ | ||
| 204 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 205 | } | ||
| 206 | |||
| 207 | #define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\ | ||
| 208 | {\ | ||
| 209 | const u32 offset = MMU_MMU_RAM_OFFSET;\ | ||
| 210 | register u32 new_value = (value);\ | ||
| 211 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\ | ||
| 212 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 213 | } | ||
| 214 | |||
| 215 | #define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\ | ||
| 216 | {\ | ||
| 217 | const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ | ||
| 218 | register u32 new_value = (value);\ | ||
| 219 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\ | ||
| 220 | __raw_writel(new_value, (base_address)+offset);\ | ||
| 221 | } | ||
| 222 | |||
| 223 | #endif /* USE_LEVEL_1_MACROS */ | ||
| 224 | |||
| 225 | #endif /* _MMU_REG_ACM_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h new file mode 100644 index 000000000000..d5266d4c163f --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_defs.h | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | /* | ||
| 2 | * hw_defs.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * Global HW definitions | ||
| 7 | * | ||
| 8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
| 9 | * | ||
| 10 | * This package is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef _HW_DEFS_H | ||
| 20 | #define _HW_DEFS_H | ||
| 21 | |||
| 22 | /* Page size */ | ||
| 23 | #define HW_PAGE_SIZE4KB 0x1000 | ||
| 24 | #define HW_PAGE_SIZE64KB 0x10000 | ||
| 25 | #define HW_PAGE_SIZE1MB 0x100000 | ||
| 26 | #define HW_PAGE_SIZE16MB 0x1000000 | ||
| 27 | |||
| 28 | /* hw_status: return type for HW API */ | ||
| 29 | typedef long hw_status; | ||
| 30 | |||
| 31 | /* Macro used to set and clear any bit */ | ||
| 32 | #define HW_CLEAR 0 | ||
| 33 | #define HW_SET 1 | ||
| 34 | |||
| 35 | /* hw_endianism_t: Enumerated Type used to specify the endianism | ||
| 36 | * Do NOT change these values. They are used as bit fields. */ | ||
| 37 | enum hw_endianism_t { | ||
| 38 | HW_LITTLE_ENDIAN, | ||
| 39 | HW_BIG_ENDIAN | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* hw_element_size_t: Enumerated Type used to specify the element size | ||
| 43 | * Do NOT change these values. They are used as bit fields. */ | ||
| 44 | enum hw_element_size_t { | ||
| 45 | HW_ELEM_SIZE8BIT, | ||
| 46 | HW_ELEM_SIZE16BIT, | ||
| 47 | HW_ELEM_SIZE32BIT, | ||
| 48 | HW_ELEM_SIZE64BIT | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* hw_idle_mode_t: Enumerated Type used to specify Idle modes */ | ||
| 52 | enum hw_idle_mode_t { | ||
| 53 | HW_FORCE_IDLE, | ||
| 54 | HW_NO_IDLE, | ||
| 55 | HW_SMART_IDLE | ||
| 56 | }; | ||
| 57 | |||
| 58 | #endif /* _HW_DEFS_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c new file mode 100644 index 000000000000..014f5d5293ae --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_mmu.c | |||
| @@ -0,0 +1,562 @@ | |||
| 1 | /* | ||
| 2 | * hw_mmu.c | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * API definitions to setup MMU TLB and PTE | ||
| 7 | * | ||
| 8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
| 9 | * | ||
| 10 | * This package is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/io.h> | ||
| 20 | #include "MMURegAcM.h" | ||
| 21 | #include <hw_defs.h> | ||
| 22 | #include <hw_mmu.h> | ||
| 23 | #include <linux/types.h> | ||
| 24 | #include <linux/err.h> | ||
| 25 | |||
| 26 | #define MMU_BASE_VAL_MASK 0xFC00 | ||
| 27 | #define MMU_PAGE_MAX 3 | ||
| 28 | #define MMU_ELEMENTSIZE_MAX 3 | ||
| 29 | #define MMU_ADDR_MASK 0xFFFFF000 | ||
| 30 | #define MMU_TTB_MASK 0xFFFFC000 | ||
| 31 | #define MMU_SECTION_ADDR_MASK 0xFFF00000 | ||
| 32 | #define MMU_SSECTION_ADDR_MASK 0xFF000000 | ||
| 33 | #define MMU_PAGE_TABLE_MASK 0xFFFFFC00 | ||
| 34 | #define MMU_LARGE_PAGE_MASK 0xFFFF0000 | ||
| 35 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | ||
| 36 | |||
| 37 | #define MMU_LOAD_TLB 0x00000001 | ||
| 38 | #define MMU_GFLUSH 0x60 | ||
| 39 | |||
| 40 | /* | ||
| 41 | * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS) | ||
| 42 | */ | ||
| 43 | enum hw_mmu_page_size_t { | ||
| 44 | HW_MMU_SECTION, | ||
| 45 | HW_MMU_LARGE_PAGE, | ||
| 46 | HW_MMU_SMALL_PAGE, | ||
| 47 | HW_MMU_SUPERSECTION | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * FUNCTION : mmu_flush_entry | ||
| 52 | * | ||
| 53 | * INPUTS: | ||
| 54 | * | ||
| 55 | * Identifier : base_address | ||
| 56 | * Type : const u32 | ||
| 57 | * Description : Base Address of instance of MMU module | ||
| 58 | * | ||
| 59 | * RETURNS: | ||
| 60 | * | ||
| 61 | * Type : hw_status | ||
| 62 | * Description : 0 -- No errors occured | ||
| 63 | * RET_BAD_NULL_PARAM -- A Pointer | ||
| 64 | * Paramater was set to NULL | ||
| 65 | * | ||
| 66 | * PURPOSE: : Flush the TLB entry pointed by the | ||
| 67 | * lock counter register | ||
| 68 | * even if this entry is set protected | ||
| 69 | * | ||
| 70 | * METHOD: : Check the Input parameter and Flush a | ||
| 71 | * single entry in the TLB. | ||
| 72 | */ | ||
| 73 | static hw_status mmu_flush_entry(const void __iomem *base_address); | ||
| 74 | |||
| 75 | /* | ||
| 76 | * FUNCTION : mmu_set_cam_entry | ||
| 77 | * | ||
| 78 | * INPUTS: | ||
| 79 | * | ||
| 80 | * Identifier : base_address | ||
| 81 | * TypE : const u32 | ||
| 82 | * Description : Base Address of instance of MMU module | ||
| 83 | * | ||
| 84 | * Identifier : page_sz | ||
| 85 | * TypE : const u32 | ||
| 86 | * Description : It indicates the page size | ||
| 87 | * | ||
| 88 | * Identifier : preserved_bit | ||
| 89 | * Type : const u32 | ||
| 90 | * Description : It indicates the TLB entry is preserved entry | ||
| 91 | * or not | ||
| 92 | * | ||
| 93 | * Identifier : valid_bit | ||
| 94 | * Type : const u32 | ||
| 95 | * Description : It indicates the TLB entry is valid entry or not | ||
| 96 | * | ||
| 97 | * | ||
| 98 | * Identifier : virtual_addr_tag | ||
| 99 | * Type : const u32 | ||
| 100 | * Description : virtual Address | ||
| 101 | * | ||
| 102 | * RETURNS: | ||
| 103 | * | ||
| 104 | * Type : hw_status | ||
| 105 | * Description : 0 -- No errors occured | ||
| 106 | * RET_BAD_NULL_PARAM -- A Pointer Paramater | ||
| 107 | * was set to NULL | ||
| 108 | * RET_PARAM_OUT_OF_RANGE -- Input Parameter out | ||
| 109 | * of Range | ||
| 110 | * | ||
| 111 | * PURPOSE: : Set MMU_CAM reg | ||
| 112 | * | ||
| 113 | * METHOD: : Check the Input parameters and set the CAM entry. | ||
| 114 | */ | ||
| 115 | static hw_status mmu_set_cam_entry(const void __iomem *base_address, | ||
| 116 | const u32 page_sz, | ||
| 117 | const u32 preserved_bit, | ||
| 118 | const u32 valid_bit, | ||
| 119 | const u32 virtual_addr_tag); | ||
| 120 | |||
| 121 | /* | ||
| 122 | * FUNCTION : mmu_set_ram_entry | ||
| 123 | * | ||
| 124 | * INPUTS: | ||
| 125 | * | ||
| 126 | * Identifier : base_address | ||
| 127 | * Type : const u32 | ||
| 128 | * Description : Base Address of instance of MMU module | ||
| 129 | * | ||
| 130 | * Identifier : physical_addr | ||
| 131 | * Type : const u32 | ||
| 132 | * Description : Physical Address to which the corresponding | ||
| 133 | * virtual Address shouldpoint | ||
| 134 | * | ||
| 135 | * Identifier : endianism | ||
| 136 | * Type : hw_endianism_t | ||
| 137 | * Description : endianism for the given page | ||
| 138 | * | ||
| 139 | * Identifier : element_size | ||
| 140 | * Type : hw_element_size_t | ||
| 141 | * Description : The element size ( 8,16, 32 or 64 bit) | ||
| 142 | * | ||
| 143 | * Identifier : mixed_size | ||
| 144 | * Type : hw_mmu_mixed_size_t | ||
| 145 | * Description : Element Size to follow CPU or TLB | ||
| 146 | * | ||
| 147 | * RETURNS: | ||
| 148 | * | ||
| 149 | * Type : hw_status | ||
| 150 | * Description : 0 -- No errors occured | ||
| 151 | * RET_BAD_NULL_PARAM -- A Pointer Paramater | ||
| 152 | * was set to NULL | ||
| 153 | * RET_PARAM_OUT_OF_RANGE -- Input Parameter | ||
| 154 | * out of Range | ||
| 155 | * | ||
| 156 | * PURPOSE: : Set MMU_CAM reg | ||
| 157 | * | ||
| 158 | * METHOD: : Check the Input parameters and set the RAM entry. | ||
| 159 | */ | ||
| 160 | static hw_status mmu_set_ram_entry(const void __iomem *base_address, | ||
| 161 | const u32 physical_addr, | ||
| 162 | enum hw_endianism_t endianism, | ||
| 163 | enum hw_element_size_t element_size, | ||
| 164 | enum hw_mmu_mixed_size_t mixed_size); | ||
| 165 | |||
| 166 | /* HW FUNCTIONS */ | ||
| 167 | |||
| 168 | hw_status hw_mmu_enable(const void __iomem *base_address) | ||
| 169 | { | ||
| 170 | hw_status status = 0; | ||
| 171 | |||
| 172 | MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET); | ||
| 173 | |||
| 174 | return status; | ||
| 175 | } | ||
| 176 | |||
| 177 | hw_status hw_mmu_disable(const void __iomem *base_address) | ||
| 178 | { | ||
| 179 | hw_status status = 0; | ||
| 180 | |||
| 181 | MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR); | ||
| 182 | |||
| 183 | return status; | ||
| 184 | } | ||
| 185 | |||
| 186 | hw_status hw_mmu_num_locked_set(const void __iomem *base_address, | ||
| 187 | u32 num_locked_entries) | ||
| 188 | { | ||
| 189 | hw_status status = 0; | ||
| 190 | |||
| 191 | MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries); | ||
| 192 | |||
| 193 | return status; | ||
| 194 | } | ||
| 195 | |||
| 196 | hw_status hw_mmu_victim_num_set(const void __iomem *base_address, | ||
| 197 | u32 victim_entry_num) | ||
| 198 | { | ||
| 199 | hw_status status = 0; | ||
| 200 | |||
| 201 | MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num); | ||
| 202 | |||
| 203 | return status; | ||
| 204 | } | ||
| 205 | |||
| 206 | hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask) | ||
| 207 | { | ||
| 208 | hw_status status = 0; | ||
| 209 | |||
| 210 | MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask); | ||
| 211 | |||
| 212 | return status; | ||
| 213 | } | ||
| 214 | |||
| 215 | hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask) | ||
| 216 | { | ||
| 217 | hw_status status = 0; | ||
| 218 | u32 irq_reg; | ||
| 219 | |||
| 220 | irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); | ||
| 221 | |||
| 222 | MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask); | ||
| 223 | |||
| 224 | return status; | ||
| 225 | } | ||
| 226 | |||
| 227 | hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask) | ||
| 228 | { | ||
| 229 | hw_status status = 0; | ||
| 230 | u32 irq_reg; | ||
| 231 | |||
| 232 | irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); | ||
| 233 | |||
| 234 | MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask); | ||
| 235 | |||
| 236 | return status; | ||
| 237 | } | ||
| 238 | |||
| 239 | hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask) | ||
| 240 | { | ||
| 241 | hw_status status = 0; | ||
| 242 | |||
| 243 | *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address); | ||
| 244 | |||
| 245 | return status; | ||
| 246 | } | ||
| 247 | |||
| 248 | hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr) | ||
| 249 | { | ||
| 250 | hw_status status = 0; | ||
| 251 | |||
| 252 | /* read values from register */ | ||
| 253 | *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address); | ||
| 254 | |||
| 255 | return status; | ||
| 256 | } | ||
| 257 | |||
| 258 | hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr) | ||
| 259 | { | ||
| 260 | hw_status status = 0; | ||
| 261 | u32 load_ttb; | ||
| 262 | |||
| 263 | load_ttb = ttb_phys_addr & ~0x7FUL; | ||
| 264 | /* write values to register */ | ||
| 265 | MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb); | ||
| 266 | |||
| 267 | return status; | ||
| 268 | } | ||
| 269 | |||
| 270 | hw_status hw_mmu_twl_enable(const void __iomem *base_address) | ||
| 271 | { | ||
| 272 | hw_status status = 0; | ||
| 273 | |||
| 274 | MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET); | ||
| 275 | |||
| 276 | return status; | ||
| 277 | } | ||
| 278 | |||
| 279 | hw_status hw_mmu_twl_disable(const void __iomem *base_address) | ||
| 280 | { | ||
| 281 | hw_status status = 0; | ||
| 282 | |||
| 283 | MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR); | ||
| 284 | |||
| 285 | return status; | ||
| 286 | } | ||
| 287 | |||
| 288 | hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr, | ||
| 289 | u32 page_sz) | ||
| 290 | { | ||
| 291 | hw_status status = 0; | ||
| 292 | u32 virtual_addr_tag; | ||
| 293 | enum hw_mmu_page_size_t pg_size_bits; | ||
| 294 | |||
| 295 | switch (page_sz) { | ||
| 296 | case HW_PAGE_SIZE4KB: | ||
| 297 | pg_size_bits = HW_MMU_SMALL_PAGE; | ||
| 298 | break; | ||
| 299 | |||
| 300 | case HW_PAGE_SIZE64KB: | ||
| 301 | pg_size_bits = HW_MMU_LARGE_PAGE; | ||
| 302 | break; | ||
| 303 | |||
| 304 | case HW_PAGE_SIZE1MB: | ||
| 305 | pg_size_bits = HW_MMU_SECTION; | ||
| 306 | break; | ||
| 307 | |||
| 308 | case HW_PAGE_SIZE16MB: | ||
| 309 | pg_size_bits = HW_MMU_SUPERSECTION; | ||
| 310 | break; | ||
| 311 | |||
| 312 | default: | ||
| 313 | return -EINVAL; | ||
| 314 | } | ||
| 315 | |||
| 316 | /* Generate the 20-bit tag from virtual address */ | ||
| 317 | virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); | ||
| 318 | |||
| 319 | mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag); | ||
| 320 | |||
| 321 | mmu_flush_entry(base_address); | ||
| 322 | |||
| 323 | return status; | ||
| 324 | } | ||
| 325 | |||
| 326 | hw_status hw_mmu_tlb_add(const void __iomem *base_address, | ||
| 327 | u32 physical_addr, | ||
| 328 | u32 virtual_addr, | ||
| 329 | u32 page_sz, | ||
| 330 | u32 entry_num, | ||
| 331 | struct hw_mmu_map_attrs_t *map_attrs, | ||
| 332 | s8 preserved_bit, s8 valid_bit) | ||
| 333 | { | ||
| 334 | hw_status status = 0; | ||
| 335 | u32 lock_reg; | ||
| 336 | u32 virtual_addr_tag; | ||
| 337 | enum hw_mmu_page_size_t mmu_pg_size; | ||
| 338 | |||
| 339 | /*Check the input Parameters */ | ||
| 340 | switch (page_sz) { | ||
| 341 | case HW_PAGE_SIZE4KB: | ||
| 342 | mmu_pg_size = HW_MMU_SMALL_PAGE; | ||
| 343 | break; | ||
| 344 | |||
| 345 | case HW_PAGE_SIZE64KB: | ||
| 346 | mmu_pg_size = HW_MMU_LARGE_PAGE; | ||
| 347 | break; | ||
| 348 | |||
| 349 | case HW_PAGE_SIZE1MB: | ||
| 350 | mmu_pg_size = HW_MMU_SECTION; | ||
| 351 | break; | ||
| 352 | |||
| 353 | case HW_PAGE_SIZE16MB: | ||
| 354 | mmu_pg_size = HW_MMU_SUPERSECTION; | ||
| 355 | break; | ||
| 356 | |||
| 357 | default: | ||
| 358 | return -EINVAL; | ||
| 359 | } | ||
| 360 | |||
| 361 | lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address); | ||
| 362 | |||
| 363 | /* Generate the 20-bit tag from virtual address */ | ||
| 364 | virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); | ||
| 365 | |||
| 366 | /* Write the fields in the CAM Entry Register */ | ||
| 367 | mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit, | ||
| 368 | virtual_addr_tag); | ||
| 369 | |||
| 370 | /* Write the different fields of the RAM Entry Register */ | ||
| 371 | /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */ | ||
| 372 | mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism, | ||
| 373 | map_attrs->element_size, map_attrs->mixed_size); | ||
| 374 | |||
| 375 | /* Update the MMU Lock Register */ | ||
| 376 | /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */ | ||
| 377 | MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num); | ||
| 378 | |||
| 379 | /* Enable loading of an entry in TLB by writing 1 | ||
| 380 | into LD_TLB_REG register */ | ||
| 381 | MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB); | ||
| 382 | |||
| 383 | MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg); | ||
| 384 | |||
| 385 | return status; | ||
| 386 | } | ||
| 387 | |||
| 388 | hw_status hw_mmu_pte_set(const u32 pg_tbl_va, | ||
| 389 | u32 physical_addr, | ||
| 390 | u32 virtual_addr, | ||
| 391 | u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs) | ||
| 392 | { | ||
| 393 | hw_status status = 0; | ||
| 394 | u32 pte_addr, pte_val; | ||
| 395 | s32 num_entries = 1; | ||
| 396 | |||
| 397 | switch (page_sz) { | ||
| 398 | case HW_PAGE_SIZE4KB: | ||
| 399 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
| 400 | virtual_addr & | ||
| 401 | MMU_SMALL_PAGE_MASK); | ||
| 402 | pte_val = | ||
| 403 | ((physical_addr & MMU_SMALL_PAGE_MASK) | | ||
| 404 | (map_attrs->endianism << 9) | (map_attrs-> | ||
| 405 | element_size << 4) | | ||
| 406 | (map_attrs->mixed_size << 11) | 2); | ||
| 407 | break; | ||
| 408 | |||
| 409 | case HW_PAGE_SIZE64KB: | ||
| 410 | num_entries = 16; | ||
| 411 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
| 412 | virtual_addr & | ||
| 413 | MMU_LARGE_PAGE_MASK); | ||
| 414 | pte_val = | ||
| 415 | ((physical_addr & MMU_LARGE_PAGE_MASK) | | ||
| 416 | (map_attrs->endianism << 9) | (map_attrs-> | ||
| 417 | element_size << 4) | | ||
| 418 | (map_attrs->mixed_size << 11) | 1); | ||
| 419 | break; | ||
| 420 | |||
| 421 | case HW_PAGE_SIZE1MB: | ||
| 422 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
| 423 | virtual_addr & | ||
| 424 | MMU_SECTION_ADDR_MASK); | ||
| 425 | pte_val = | ||
| 426 | ((((physical_addr & MMU_SECTION_ADDR_MASK) | | ||
| 427 | (map_attrs->endianism << 15) | (map_attrs-> | ||
| 428 | element_size << 10) | | ||
| 429 | (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2); | ||
| 430 | break; | ||
| 431 | |||
| 432 | case HW_PAGE_SIZE16MB: | ||
| 433 | num_entries = 16; | ||
| 434 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
| 435 | virtual_addr & | ||
| 436 | MMU_SSECTION_ADDR_MASK); | ||
| 437 | pte_val = | ||
| 438 | (((physical_addr & MMU_SSECTION_ADDR_MASK) | | ||
| 439 | (map_attrs->endianism << 15) | (map_attrs-> | ||
| 440 | element_size << 10) | | ||
| 441 | (map_attrs->mixed_size << 17) | ||
| 442 | ) | 0x40000 | 0x2); | ||
| 443 | break; | ||
| 444 | |||
| 445 | case HW_MMU_COARSE_PAGE_SIZE: | ||
| 446 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
| 447 | virtual_addr & | ||
| 448 | MMU_SECTION_ADDR_MASK); | ||
| 449 | pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1; | ||
| 450 | break; | ||
| 451 | |||
| 452 | default: | ||
| 453 | return -EINVAL; | ||
| 454 | } | ||
| 455 | |||
| 456 | while (--num_entries >= 0) | ||
| 457 | ((u32 *) pte_addr)[num_entries] = pte_val; | ||
| 458 | |||
| 459 | return status; | ||
| 460 | } | ||
| 461 | |||
| 462 | hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size) | ||
| 463 | { | ||
| 464 | hw_status status = 0; | ||
| 465 | u32 pte_addr; | ||
| 466 | s32 num_entries = 1; | ||
| 467 | |||
| 468 | switch (page_size) { | ||
| 469 | case HW_PAGE_SIZE4KB: | ||
| 470 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
| 471 | virtual_addr & | ||
| 472 | MMU_SMALL_PAGE_MASK); | ||
| 473 | break; | ||
| 474 | |||
| 475 | case HW_PAGE_SIZE64KB: | ||
| 476 | num_entries = 16; | ||
| 477 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
| 478 | virtual_addr & | ||
| 479 | MMU_LARGE_PAGE_MASK); | ||
| 480 | break; | ||
| 481 | |||
| 482 | case HW_PAGE_SIZE1MB: | ||
| 483 | case HW_MMU_COARSE_PAGE_SIZE: | ||
| 484 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
| 485 | virtual_addr & | ||
| 486 | MMU_SECTION_ADDR_MASK); | ||
| 487 | break; | ||
| 488 | |||
| 489 | case HW_PAGE_SIZE16MB: | ||
| 490 | num_entries = 16; | ||
| 491 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
| 492 | virtual_addr & | ||
| 493 | MMU_SSECTION_ADDR_MASK); | ||
| 494 | break; | ||
| 495 | |||
| 496 | default: | ||
| 497 | return -EINVAL; | ||
| 498 | } | ||
| 499 | |||
| 500 | while (--num_entries >= 0) | ||
| 501 | ((u32 *) pte_addr)[num_entries] = 0; | ||
| 502 | |||
| 503 | return status; | ||
| 504 | } | ||
| 505 | |||
| 506 | /* mmu_flush_entry */ | ||
| 507 | static hw_status mmu_flush_entry(const void __iomem *base_address) | ||
| 508 | { | ||
| 509 | hw_status status = 0; | ||
| 510 | u32 flush_entry_data = 0x1; | ||
| 511 | |||
| 512 | /* write values to register */ | ||
| 513 | MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data); | ||
| 514 | |||
| 515 | return status; | ||
| 516 | } | ||
| 517 | |||
| 518 | /* mmu_set_cam_entry */ | ||
| 519 | static hw_status mmu_set_cam_entry(const void __iomem *base_address, | ||
| 520 | const u32 page_sz, | ||
| 521 | const u32 preserved_bit, | ||
| 522 | const u32 valid_bit, | ||
| 523 | const u32 virtual_addr_tag) | ||
| 524 | { | ||
| 525 | hw_status status = 0; | ||
| 526 | u32 mmu_cam_reg; | ||
| 527 | |||
| 528 | mmu_cam_reg = (virtual_addr_tag << 12); | ||
| 529 | mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) | | ||
| 530 | (preserved_bit << 3); | ||
| 531 | |||
| 532 | /* write values to register */ | ||
| 533 | MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg); | ||
| 534 | |||
| 535 | return status; | ||
| 536 | } | ||
| 537 | |||
| 538 | /* mmu_set_ram_entry */ | ||
| 539 | static hw_status mmu_set_ram_entry(const void __iomem *base_address, | ||
| 540 | const u32 physical_addr, | ||
| 541 | enum hw_endianism_t endianism, | ||
| 542 | enum hw_element_size_t element_size, | ||
| 543 | enum hw_mmu_mixed_size_t mixed_size) | ||
| 544 | { | ||
| 545 | hw_status status = 0; | ||
| 546 | u32 mmu_ram_reg; | ||
| 547 | |||
| 548 | mmu_ram_reg = (physical_addr & MMU_ADDR_MASK); | ||
| 549 | mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) | | ||
| 550 | (mixed_size << 6)); | ||
| 551 | |||
| 552 | /* write values to register */ | ||
| 553 | MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg); | ||
| 554 | |||
| 555 | return status; | ||
| 556 | |||
| 557 | } | ||
| 558 | |||
| 559 | void hw_mmu_tlb_flush_all(const void __iomem *base) | ||
| 560 | { | ||
| 561 | __raw_writeb(1, base + MMU_GFLUSH); | ||
| 562 | } | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h new file mode 100644 index 000000000000..1458a2c6027b --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_mmu.h | |||
| @@ -0,0 +1,163 @@ | |||
| 1 | /* | ||
| 2 | * hw_mmu.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * MMU types and API declarations | ||
| 7 | * | ||
| 8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
| 9 | * | ||
| 10 | * This package is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef _HW_MMU_H | ||
| 20 | #define _HW_MMU_H | ||
| 21 | |||
| 22 | #include <linux/types.h> | ||
| 23 | |||
| 24 | /* Bitmasks for interrupt sources */ | ||
| 25 | #define HW_MMU_TRANSLATION_FAULT 0x2 | ||
| 26 | #define HW_MMU_ALL_INTERRUPTS 0x1F | ||
| 27 | |||
| 28 | #define HW_MMU_COARSE_PAGE_SIZE 0x400 | ||
| 29 | |||
| 30 | /* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow | ||
| 31 | CPU/TLB Element size */ | ||
| 32 | enum hw_mmu_mixed_size_t { | ||
| 33 | HW_MMU_TLBES, | ||
| 34 | HW_MMU_CPUES | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */ | ||
| 38 | struct hw_mmu_map_attrs_t { | ||
| 39 | enum hw_endianism_t endianism; | ||
| 40 | enum hw_element_size_t element_size; | ||
| 41 | enum hw_mmu_mixed_size_t mixed_size; | ||
| 42 | bool donotlockmpupage; | ||
| 43 | }; | ||
| 44 | |||
| 45 | extern hw_status hw_mmu_enable(const void __iomem *base_address); | ||
| 46 | |||
| 47 | extern hw_status hw_mmu_disable(const void __iomem *base_address); | ||
| 48 | |||
| 49 | extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address, | ||
| 50 | u32 num_locked_entries); | ||
| 51 | |||
| 52 | extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address, | ||
| 53 | u32 victim_entry_num); | ||
| 54 | |||
| 55 | /* For MMU faults */ | ||
| 56 | extern hw_status hw_mmu_event_ack(const void __iomem *base_address, | ||
| 57 | u32 irq_mask); | ||
| 58 | |||
| 59 | extern hw_status hw_mmu_event_disable(const void __iomem *base_address, | ||
| 60 | u32 irq_mask); | ||
| 61 | |||
| 62 | extern hw_status hw_mmu_event_enable(const void __iomem *base_address, | ||
| 63 | u32 irq_mask); | ||
| 64 | |||
| 65 | extern hw_status hw_mmu_event_status(const void __iomem *base_address, | ||
| 66 | u32 *irq_mask); | ||
| 67 | |||
| 68 | extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, | ||
| 69 | u32 *addr); | ||
| 70 | |||
| 71 | /* Set the TT base address */ | ||
| 72 | extern hw_status hw_mmu_ttb_set(const void __iomem *base_address, | ||
| 73 | u32 ttb_phys_addr); | ||
| 74 | |||
| 75 | extern hw_status hw_mmu_twl_enable(const void __iomem *base_address); | ||
| 76 | |||
| 77 | extern hw_status hw_mmu_twl_disable(const void __iomem *base_address); | ||
| 78 | |||
| 79 | extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address, | ||
| 80 | u32 virtual_addr, u32 page_sz); | ||
| 81 | |||
| 82 | extern hw_status hw_mmu_tlb_add(const void __iomem *base_address, | ||
| 83 | u32 physical_addr, | ||
| 84 | u32 virtual_addr, | ||
| 85 | u32 page_sz, | ||
| 86 | u32 entry_num, | ||
| 87 | struct hw_mmu_map_attrs_t *map_attrs, | ||
| 88 | s8 preserved_bit, s8 valid_bit); | ||
| 89 | |||
| 90 | /* For PTEs */ | ||
| 91 | extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va, | ||
| 92 | u32 physical_addr, | ||
| 93 | u32 virtual_addr, | ||
| 94 | u32 page_sz, | ||
| 95 | struct hw_mmu_map_attrs_t *map_attrs); | ||
| 96 | |||
| 97 | extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, | ||
| 98 | u32 virtual_addr, u32 page_size); | ||
| 99 | |||
| 100 | void hw_mmu_tlb_flush_all(const void __iomem *base); | ||
| 101 | |||
| 102 | static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va) | ||
| 103 | { | ||
| 104 | u32 pte_addr; | ||
| 105 | u32 va31_to20; | ||
| 106 | |||
| 107 | va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */ | ||
| 108 | va31_to20 &= 0xFFFFFFFCUL; | ||
| 109 | pte_addr = l1_base + va31_to20; | ||
| 110 | |||
| 111 | return pte_addr; | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va) | ||
| 115 | { | ||
| 116 | u32 pte_addr; | ||
| 117 | |||
| 118 | pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); | ||
| 119 | |||
| 120 | return pte_addr; | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val) | ||
| 124 | { | ||
| 125 | u32 pte_coarse; | ||
| 126 | |||
| 127 | pte_coarse = pte_val & 0xFFFFFC00; | ||
| 128 | |||
| 129 | return pte_coarse; | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline u32 hw_mmu_pte_size_l1(u32 pte_val) | ||
| 133 | { | ||
| 134 | u32 pte_size = 0; | ||
| 135 | |||
| 136 | if ((pte_val & 0x3) == 0x1) { | ||
| 137 | /* Points to L2 PT */ | ||
| 138 | pte_size = HW_MMU_COARSE_PAGE_SIZE; | ||
| 139 | } | ||
| 140 | |||
| 141 | if ((pte_val & 0x3) == 0x2) { | ||
| 142 | if (pte_val & (1 << 18)) | ||
| 143 | pte_size = HW_PAGE_SIZE16MB; | ||
| 144 | else | ||
| 145 | pte_size = HW_PAGE_SIZE1MB; | ||
| 146 | } | ||
| 147 | |||
| 148 | return pte_size; | ||
| 149 | } | ||
| 150 | |||
| 151 | static inline u32 hw_mmu_pte_size_l2(u32 pte_val) | ||
| 152 | { | ||
| 153 | u32 pte_size = 0; | ||
| 154 | |||
| 155 | if (pte_val & 0x2) | ||
| 156 | pte_size = HW_PAGE_SIZE4KB; | ||
| 157 | else if (pte_val & 0x1) | ||
| 158 | pte_size = HW_PAGE_SIZE64KB; | ||
| 159 | |||
| 160 | return pte_size; | ||
| 161 | } | ||
| 162 | |||
| 163 | #endif /* _HW_MMU_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h index dfb55cca34c7..38122dbf877a 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h +++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h | |||
| @@ -68,6 +68,7 @@ struct cfg_hostres { | |||
| 68 | void __iomem *dw_per_base; | 68 | void __iomem *dw_per_base; |
| 69 | u32 dw_per_pm_base; | 69 | u32 dw_per_pm_base; |
| 70 | u32 dw_core_pm_base; | 70 | u32 dw_core_pm_base; |
| 71 | void __iomem *dw_dmmu_base; | ||
| 71 | void __iomem *dw_sys_ctrl_base; | 72 | void __iomem *dw_sys_ctrl_base; |
| 72 | }; | 73 | }; |
| 73 | 74 | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h index 9bdd48f57429..357458fadd2a 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dev.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <dspbridge/nodedefs.h> | 27 | #include <dspbridge/nodedefs.h> |
| 28 | #include <dspbridge/dispdefs.h> | 28 | #include <dspbridge/dispdefs.h> |
| 29 | #include <dspbridge/dspdefs.h> | 29 | #include <dspbridge/dspdefs.h> |
| 30 | #include <dspbridge/dmm.h> | ||
| 30 | #include <dspbridge/host_os.h> | 31 | #include <dspbridge/host_os.h> |
| 31 | 32 | ||
| 32 | /* ----------------------------------- This */ | 33 | /* ----------------------------------- This */ |
| @@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj, | |||
| 233 | struct cmm_object **mgr); | 234 | struct cmm_object **mgr); |
| 234 | 235 | ||
| 235 | /* | 236 | /* |
| 237 | * ======== dev_get_dmm_mgr ======== | ||
| 238 | * Purpose: | ||
| 239 | * Retrieve the handle to the dynamic memory manager created for this | ||
| 240 | * device. | ||
| 241 | * Parameters: | ||
| 242 | * hdev_obj: Handle to device object created with | ||
| 243 | * dev_create_device(). | ||
| 244 | * *mgr: Ptr to location to store handle. | ||
| 245 | * Returns: | ||
| 246 | * 0: Success. | ||
| 247 | * -EFAULT: Invalid hdev_obj. | ||
| 248 | * Requires: | ||
| 249 | * mgr != NULL. | ||
| 250 | * DEV Initialized. | ||
| 251 | * Ensures: | ||
| 252 | * 0: *mgr contains a handle to a channel manager object, | ||
| 253 | * or NULL. | ||
| 254 | * else: *mgr is NULL. | ||
| 255 | */ | ||
| 256 | extern int dev_get_dmm_mgr(struct dev_object *hdev_obj, | ||
| 257 | struct dmm_object **mgr); | ||
| 258 | |||
| 259 | /* | ||
| 236 | * ======== dev_get_cod_mgr ======== | 260 | * ======== dev_get_cod_mgr ======== |
| 237 | * Purpose: | 261 | * Purpose: |
| 238 | * Retrieve the COD manager create for this device. | 262 | * Retrieve the COD manager create for this device. |
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h new file mode 100644 index 000000000000..6c58335c5f60 --- /dev/null +++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | /* | ||
| 2 | * dmm.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address | ||
| 7 | * space that can be directly mapped to any MPU buffer or memory region. | ||
| 8 | * | ||
| 9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
| 10 | * | ||
| 11 | * This package is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License version 2 as | ||
| 13 | * published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifndef DMM_ | ||
| 21 | #define DMM_ | ||
| 22 | |||
| 23 | #include <dspbridge/dbdefs.h> | ||
| 24 | |||
| 25 | struct dmm_object; | ||
| 26 | |||
| 27 | /* DMM attributes used in dmm_create() */ | ||
| 28 | struct dmm_mgrattrs { | ||
| 29 | u32 reserved; | ||
| 30 | }; | ||
| 31 | |||
| 32 | #define DMMPOOLSIZE 0x4000000 | ||
| 33 | |||
| 34 | /* | ||
| 35 | * ======== dmm_get_handle ======== | ||
| 36 | * Purpose: | ||
| 37 | * Return the dynamic memory manager object for this device. | ||
| 38 | * This is typically called from the client process. | ||
| 39 | */ | ||
| 40 | |||
| 41 | extern int dmm_get_handle(void *hprocessor, | ||
| 42 | struct dmm_object **dmm_manager); | ||
| 43 | |||
| 44 | extern int dmm_reserve_memory(struct dmm_object *dmm_mgr, | ||
| 45 | u32 size, u32 *prsv_addr); | ||
| 46 | |||
| 47 | extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, | ||
| 48 | u32 rsv_addr); | ||
| 49 | |||
| 50 | extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, | ||
| 51 | u32 size); | ||
| 52 | |||
| 53 | extern int dmm_un_map_memory(struct dmm_object *dmm_mgr, | ||
| 54 | u32 addr, u32 *psize); | ||
| 55 | |||
| 56 | extern int dmm_destroy(struct dmm_object *dmm_mgr); | ||
| 57 | |||
| 58 | extern int dmm_delete_tables(struct dmm_object *dmm_mgr); | ||
| 59 | |||
| 60 | extern int dmm_create(struct dmm_object **dmm_manager, | ||
| 61 | struct dev_object *hdev_obj, | ||
| 62 | const struct dmm_mgrattrs *mgr_attrts); | ||
| 63 | |||
| 64 | extern bool dmm_init(void); | ||
| 65 | |||
| 66 | extern void dmm_exit(void); | ||
| 67 | |||
| 68 | extern int dmm_create_tables(struct dmm_object *dmm_mgr, | ||
| 69 | u32 addr, u32 size); | ||
| 70 | |||
| 71 | #ifdef DSP_DMM_DEBUG | ||
| 72 | u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr); | ||
| 73 | #endif | ||
| 74 | |||
| 75 | #endif /* DMM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h index 75a2c9b5c6f2..c1f363ec9afa 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/drv.h +++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h | |||
| @@ -108,6 +108,12 @@ struct dmm_map_object { | |||
| 108 | struct bridge_dma_map_info dma_info; | 108 | struct bridge_dma_map_info dma_info; |
| 109 | }; | 109 | }; |
| 110 | 110 | ||
| 111 | /* Used for DMM reserved memory accounting */ | ||
| 112 | struct dmm_rsv_object { | ||
| 113 | struct list_head link; | ||
| 114 | u32 dsp_reserved_addr; | ||
| 115 | }; | ||
| 116 | |||
| 111 | /* New structure (member of process context) abstracts DMM resource info */ | 117 | /* New structure (member of process context) abstracts DMM resource info */ |
| 112 | struct dspheap_res_object { | 118 | struct dspheap_res_object { |
| 113 | s32 heap_allocated; /* DMM status */ | 119 | s32 heap_allocated; /* DMM status */ |
| @@ -159,6 +165,10 @@ struct process_context { | |||
| 159 | struct list_head dmm_map_list; | 165 | struct list_head dmm_map_list; |
| 160 | spinlock_t dmm_map_lock; | 166 | spinlock_t dmm_map_lock; |
| 161 | 167 | ||
| 168 | /* DMM reserved memory resources */ | ||
| 169 | struct list_head dmm_rsv_list; | ||
| 170 | spinlock_t dmm_rsv_lock; | ||
| 171 | |||
| 162 | /* DSP Heap resources */ | 172 | /* DSP Heap resources */ |
| 163 | struct dspheap_res_object *pdspheap_list; | 173 | struct dspheap_res_object *pdspheap_list; |
| 164 | 174 | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h deleted file mode 100644 index cb38d4cc0734..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h +++ /dev/null | |||
| @@ -1,67 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * dsp-mmu.h | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * DSP iommu. | ||
| 7 | * | ||
| 8 | * Copyright (C) 2005-2010 Texas Instruments, Inc. | ||
| 9 | * | ||
| 10 | * This package is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef _DSP_MMU_ | ||
| 20 | #define _DSP_MMU_ | ||
| 21 | |||
| 22 | #include <plat/iommu.h> | ||
| 23 | #include <plat/iovmm.h> | ||
| 24 | |||
| 25 | /** | ||
| 26 | * dsp_mmu_init() - initialize dsp_mmu module and returns a handle | ||
| 27 | * | ||
| 28 | * This function initialize dsp mmu module and returns a struct iommu | ||
| 29 | * handle to use it for dsp maps. | ||
| 30 | * | ||
| 31 | */ | ||
| 32 | struct iommu *dsp_mmu_init(void); | ||
| 33 | |||
| 34 | /** | ||
| 35 | * dsp_mmu_exit() - destroy dsp mmu module | ||
| 36 | * @mmu: Pointer to iommu handle. | ||
| 37 | * | ||
| 38 | * This function destroys dsp mmu module. | ||
| 39 | * | ||
| 40 | */ | ||
| 41 | void dsp_mmu_exit(struct iommu *mmu); | ||
| 42 | |||
| 43 | /** | ||
| 44 | * user_to_dsp_map() - maps user to dsp virtual address | ||
| 45 | * @mmu: Pointer to iommu handle. | ||
| 46 | * @uva: Virtual user space address. | ||
| 47 | * @da DSP address | ||
| 48 | * @size Buffer size to map. | ||
| 49 | * @usr_pgs struct page array pointer where the user pages will be stored | ||
| 50 | * | ||
| 51 | * This function maps a user space buffer into DSP virtual address. | ||
| 52 | * | ||
| 53 | */ | ||
| 54 | u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, | ||
| 55 | struct page **usr_pgs); | ||
| 56 | |||
| 57 | /** | ||
| 58 | * user_to_dsp_unmap() - unmaps DSP virtual buffer. | ||
| 59 | * @mmu: Pointer to iommu handle. | ||
| 60 | * @da DSP address | ||
| 61 | * | ||
| 62 | * This function unmaps a user space buffer into DSP virtual address. | ||
| 63 | * | ||
| 64 | */ | ||
| 65 | int user_to_dsp_unmap(struct iommu *mmu, u32 da); | ||
| 66 | |||
| 67 | #endif | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h index 615363474810..0ae7d1646a1b 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h | |||
| @@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context | |||
| 162 | u32 mem_type); | 162 | u32 mem_type); |
| 163 | 163 | ||
| 164 | /* | 164 | /* |
| 165 | * ======== bridge_brd_mem_map ======== | ||
| 166 | * Purpose: | ||
| 167 | * Map a MPU memory region to a DSP/IVA memory space | ||
| 168 | * Parameters: | ||
| 169 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
| 170 | * ul_mpu_addr: MPU memory region start address. | ||
| 171 | * virt_addr: DSP/IVA memory region u8 address. | ||
| 172 | * ul_num_bytes: Number of bytes to map. | ||
| 173 | * map_attrs: Mapping attributes (e.g. endianness). | ||
| 174 | * Returns: | ||
| 175 | * 0: Success. | ||
| 176 | * -EPERM: Other, unspecified error. | ||
| 177 | * Requires: | ||
| 178 | * dev_ctxt != NULL; | ||
| 179 | * Ensures: | ||
| 180 | */ | ||
| 181 | typedef int(*fxn_brd_memmap) (struct bridge_dev_context | ||
| 182 | * dev_ctxt, u32 ul_mpu_addr, | ||
| 183 | u32 virt_addr, u32 ul_num_bytes, | ||
| 184 | u32 map_attr, | ||
| 185 | struct page **mapped_pages); | ||
| 186 | |||
| 187 | /* | ||
| 188 | * ======== bridge_brd_mem_un_map ======== | ||
| 189 | * Purpose: | ||
| 190 | * UnMap an MPU memory region from DSP/IVA memory space | ||
| 191 | * Parameters: | ||
| 192 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
| 193 | * virt_addr: DSP/IVA memory region u8 address. | ||
| 194 | * ul_num_bytes: Number of bytes to unmap. | ||
| 195 | * Returns: | ||
| 196 | * 0: Success. | ||
| 197 | * -EPERM: Other, unspecified error. | ||
| 198 | * Requires: | ||
| 199 | * dev_ctxt != NULL; | ||
| 200 | * Ensures: | ||
| 201 | */ | ||
| 202 | typedef int(*fxn_brd_memunmap) (struct bridge_dev_context | ||
| 203 | * dev_ctxt, | ||
| 204 | u32 virt_addr, u32 ul_num_bytes); | ||
| 205 | |||
| 206 | /* | ||
| 165 | * ======== bridge_brd_stop ======== | 207 | * ======== bridge_brd_stop ======== |
| 166 | * Purpose: | 208 | * Purpose: |
| 167 | * Bring board to the BRD_STOPPED state. | 209 | * Bring board to the BRD_STOPPED state. |
| @@ -951,6 +993,8 @@ struct bridge_drv_interface { | |||
| 951 | fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ | 993 | fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ |
| 952 | fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ | 994 | fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ |
| 953 | fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ | 995 | fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ |
| 996 | fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */ | ||
| 997 | fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */ | ||
| 954 | fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ | 998 | fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ |
| 955 | fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ | 999 | fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ |
| 956 | fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ | 1000 | fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ |
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h index bad180108ada..41e0594dff34 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h | |||
| @@ -19,6 +19,10 @@ | |||
| 19 | #ifndef DSPIOCTL_ | 19 | #ifndef DSPIOCTL_ |
| 20 | #define DSPIOCTL_ | 20 | #define DSPIOCTL_ |
| 21 | 21 | ||
| 22 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
| 23 | #include <hw_defs.h> | ||
| 24 | #include <hw_mmu.h> | ||
| 25 | |||
| 22 | /* | 26 | /* |
| 23 | * Any IOCTLS at or above this value are reserved for standard Bridge driver | 27 | * Any IOCTLS at or above this value are reserved for standard Bridge driver |
| 24 | * interfaces. | 28 | * interfaces. |
| @@ -61,6 +65,9 @@ struct bridge_ioctl_extproc { | |||
| 61 | /* GPP virtual address. __va does not work for ioremapped addresses */ | 65 | /* GPP virtual address. __va does not work for ioremapped addresses */ |
| 62 | u32 ul_gpp_va; | 66 | u32 ul_gpp_va; |
| 63 | u32 ul_size; /* Size of the mapped memory in bytes */ | 67 | u32 ul_size; /* Size of the mapped memory in bytes */ |
| 68 | enum hw_endianism_t endianism; | ||
| 69 | enum hw_mmu_mixed_size_t mixed_mode; | ||
| 70 | enum hw_element_size_t elem_size; | ||
| 64 | }; | 71 | }; |
| 65 | 72 | ||
| 66 | #endif /* DSPIOCTL_ */ | 73 | #endif /* DSPIOCTL_ */ |
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h index 2d12aab6b5bf..5e09fd165d9d 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/proc.h +++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h | |||
| @@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor, | |||
| 551 | struct process_context *pr_ctxt); | 551 | struct process_context *pr_ctxt); |
| 552 | 552 | ||
| 553 | /* | 553 | /* |
| 554 | * ======== proc_reserve_memory ======== | ||
| 555 | * Purpose: | ||
| 556 | * Reserve a virtually contiguous region of DSP address space. | ||
| 557 | * Parameters: | ||
| 558 | * hprocessor : The processor handle. | ||
| 559 | * ul_size : Size of the address space to reserve. | ||
| 560 | * pp_rsv_addr : Ptr to DSP side reserved u8 address. | ||
| 561 | * Returns: | ||
| 562 | * 0 : Success. | ||
| 563 | * -EFAULT : Invalid processor handle. | ||
| 564 | * -EPERM : General failure. | ||
| 565 | * -ENOMEM : Cannot reserve chunk of this size. | ||
| 566 | * Requires: | ||
| 567 | * pp_rsv_addr is not NULL | ||
| 568 | * PROC Initialized. | ||
| 569 | * Ensures: | ||
| 570 | * Details: | ||
| 571 | */ | ||
| 572 | extern int proc_reserve_memory(void *hprocessor, | ||
| 573 | u32 ul_size, void **pp_rsv_addr, | ||
| 574 | struct process_context *pr_ctxt); | ||
| 575 | |||
| 576 | /* | ||
| 554 | * ======== proc_un_map ======== | 577 | * ======== proc_un_map ======== |
| 555 | * Purpose: | 578 | * Purpose: |
| 556 | * Removes a MPU buffer mapping from the DSP address space. | 579 | * Removes a MPU buffer mapping from the DSP address space. |
| @@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor, | |||
| 572 | extern int proc_un_map(void *hprocessor, void *map_addr, | 595 | extern int proc_un_map(void *hprocessor, void *map_addr, |
| 573 | struct process_context *pr_ctxt); | 596 | struct process_context *pr_ctxt); |
| 574 | 597 | ||
| 598 | /* | ||
| 599 | * ======== proc_un_reserve_memory ======== | ||
| 600 | * Purpose: | ||
| 601 | * Frees a previously reserved region of DSP address space. | ||
| 602 | * Parameters: | ||
| 603 | * hprocessor : The processor handle. | ||
| 604 | * prsv_addr : Ptr to DSP side reservedBYTE address. | ||
| 605 | * Returns: | ||
| 606 | * 0 : Success. | ||
| 607 | * -EFAULT : Invalid processor handle. | ||
| 608 | * -EPERM : General failure. | ||
| 609 | * -ENOENT : Cannot find a reserved region starting with this | ||
| 610 | * : address. | ||
| 611 | * Requires: | ||
| 612 | * prsv_addr is not NULL | ||
| 613 | * PROC Initialized. | ||
| 614 | * Ensures: | ||
| 615 | * Details: | ||
| 616 | */ | ||
| 617 | extern int proc_un_reserve_memory(void *hprocessor, | ||
| 618 | void *prsv_addr, | ||
| 619 | struct process_context *pr_ctxt); | ||
| 620 | |||
| 575 | #endif /* PROC_ */ | 621 | #endif /* PROC_ */ |
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c index 7b30267ef0e2..132e960967b9 100644 --- a/drivers/staging/tidspbridge/pmgr/dev.c +++ b/drivers/staging/tidspbridge/pmgr/dev.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <dspbridge/cod.h> | 34 | #include <dspbridge/cod.h> |
| 35 | #include <dspbridge/drv.h> | 35 | #include <dspbridge/drv.h> |
| 36 | #include <dspbridge/proc.h> | 36 | #include <dspbridge/proc.h> |
| 37 | #include <dspbridge/dmm.h> | ||
| 37 | 38 | ||
| 38 | /* ----------------------------------- Resource Manager */ | 39 | /* ----------------------------------- Resource Manager */ |
| 39 | #include <dspbridge/mgr.h> | 40 | #include <dspbridge/mgr.h> |
| @@ -74,6 +75,7 @@ struct dev_object { | |||
| 74 | struct msg_mgr *hmsg_mgr; /* Message manager. */ | 75 | struct msg_mgr *hmsg_mgr; /* Message manager. */ |
| 75 | struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ | 76 | struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ |
| 76 | struct cmm_object *hcmm_mgr; /* SM memory manager. */ | 77 | struct cmm_object *hcmm_mgr; /* SM memory manager. */ |
| 78 | struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ | ||
| 77 | struct ldr_module *module_obj; /* Bridge Module handle. */ | 79 | struct ldr_module *module_obj; /* Bridge Module handle. */ |
| 78 | u32 word_size; /* DSP word size: quick access. */ | 80 | u32 word_size; /* DSP word size: quick access. */ |
| 79 | struct drv_object *hdrv_obj; /* Driver Object */ | 81 | struct drv_object *hdrv_obj; /* Driver Object */ |
| @@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj, | |||
| 248 | /* Instantiate the DEH module */ | 250 | /* Instantiate the DEH module */ |
| 249 | status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); | 251 | status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); |
| 250 | } | 252 | } |
| 253 | /* Create DMM mgr . */ | ||
| 254 | status = dmm_create(&dev_obj->dmm_mgr, | ||
| 255 | (struct dev_object *)dev_obj, NULL); | ||
| 251 | } | 256 | } |
| 252 | /* Add the new DEV_Object to the global list: */ | 257 | /* Add the new DEV_Object to the global list: */ |
| 253 | if (!status) { | 258 | if (!status) { |
| @@ -273,6 +278,8 @@ leave: | |||
| 273 | kfree(dev_obj->proc_list); | 278 | kfree(dev_obj->proc_list); |
| 274 | if (dev_obj->cod_mgr) | 279 | if (dev_obj->cod_mgr) |
| 275 | cod_delete(dev_obj->cod_mgr); | 280 | cod_delete(dev_obj->cod_mgr); |
| 281 | if (dev_obj->dmm_mgr) | ||
| 282 | dmm_destroy(dev_obj->dmm_mgr); | ||
| 276 | kfree(dev_obj); | 283 | kfree(dev_obj); |
| 277 | } | 284 | } |
| 278 | 285 | ||
| @@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj) | |||
| 382 | dev_obj->hcmm_mgr = NULL; | 389 | dev_obj->hcmm_mgr = NULL; |
| 383 | } | 390 | } |
| 384 | 391 | ||
| 392 | if (dev_obj->dmm_mgr) { | ||
| 393 | dmm_destroy(dev_obj->dmm_mgr); | ||
| 394 | dev_obj->dmm_mgr = NULL; | ||
| 395 | } | ||
| 396 | |||
| 385 | /* Call the driver's bridge_dev_destroy() function: */ | 397 | /* Call the driver's bridge_dev_destroy() function: */ |
| 386 | /* Require of DevDestroy */ | 398 | /* Require of DevDestroy */ |
| 387 | if (dev_obj->hbridge_context) { | 399 | if (dev_obj->hbridge_context) { |
| @@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj, | |||
| 462 | } | 474 | } |
| 463 | 475 | ||
| 464 | /* | 476 | /* |
| 477 | * ======== dev_get_dmm_mgr ======== | ||
| 478 | * Purpose: | ||
| 479 | * Retrieve the handle to the dynamic memory manager created for this | ||
| 480 | * device. | ||
| 481 | */ | ||
| 482 | int dev_get_dmm_mgr(struct dev_object *hdev_obj, | ||
| 483 | struct dmm_object **mgr) | ||
| 484 | { | ||
| 485 | int status = 0; | ||
| 486 | struct dev_object *dev_obj = hdev_obj; | ||
| 487 | |||
| 488 | DBC_REQUIRE(refs > 0); | ||
| 489 | DBC_REQUIRE(mgr != NULL); | ||
| 490 | |||
| 491 | if (hdev_obj) { | ||
| 492 | *mgr = dev_obj->dmm_mgr; | ||
| 493 | } else { | ||
| 494 | *mgr = NULL; | ||
| 495 | status = -EFAULT; | ||
| 496 | } | ||
| 497 | |||
| 498 | DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL)); | ||
| 499 | return status; | ||
| 500 | } | ||
| 501 | |||
| 502 | /* | ||
| 465 | * ======== dev_get_cod_mgr ======== | 503 | * ======== dev_get_cod_mgr ======== |
| 466 | * Purpose: | 504 | * Purpose: |
| 467 | * Retrieve the COD manager create for this device. | 505 | * Retrieve the COD manager create for this device. |
| @@ -713,8 +751,10 @@ void dev_exit(void) | |||
| 713 | 751 | ||
| 714 | refs--; | 752 | refs--; |
| 715 | 753 | ||
| 716 | if (refs == 0) | 754 | if (refs == 0) { |
| 717 | cmm_exit(); | 755 | cmm_exit(); |
| 756 | dmm_exit(); | ||
| 757 | } | ||
| 718 | 758 | ||
| 719 | DBC_ENSURE(refs >= 0); | 759 | DBC_ENSURE(refs >= 0); |
| 720 | } | 760 | } |
| @@ -726,12 +766,25 @@ void dev_exit(void) | |||
| 726 | */ | 766 | */ |
| 727 | bool dev_init(void) | 767 | bool dev_init(void) |
| 728 | { | 768 | { |
| 729 | bool ret = true; | 769 | bool cmm_ret, dmm_ret, ret = true; |
| 730 | 770 | ||
| 731 | DBC_REQUIRE(refs >= 0); | 771 | DBC_REQUIRE(refs >= 0); |
| 732 | 772 | ||
| 733 | if (refs == 0) | 773 | if (refs == 0) { |
| 734 | ret = cmm_init(); | 774 | cmm_ret = cmm_init(); |
| 775 | dmm_ret = dmm_init(); | ||
| 776 | |||
| 777 | ret = cmm_ret && dmm_ret; | ||
| 778 | |||
| 779 | if (!ret) { | ||
| 780 | if (cmm_ret) | ||
| 781 | cmm_exit(); | ||
| 782 | |||
| 783 | if (dmm_ret) | ||
| 784 | dmm_exit(); | ||
| 785 | |||
| 786 | } | ||
| 787 | } | ||
| 735 | 788 | ||
| 736 | if (ret) | 789 | if (ret) |
| 737 | refs++; | 790 | refs++; |
| @@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, | |||
| 1065 | STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); | 1118 | STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); |
| 1066 | STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); | 1119 | STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); |
| 1067 | STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); | 1120 | STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); |
| 1121 | STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map); | ||
| 1122 | STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map); | ||
| 1068 | STORE_FXN(fxn_chnl_create, pfn_chnl_create); | 1123 | STORE_FXN(fxn_chnl_create, pfn_chnl_create); |
| 1069 | STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); | 1124 | STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); |
| 1070 | STORE_FXN(fxn_chnl_open, pfn_chnl_open); | 1125 | STORE_FXN(fxn_chnl_open, pfn_chnl_open); |
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c new file mode 100644 index 000000000000..8685233d7627 --- /dev/null +++ b/drivers/staging/tidspbridge/pmgr/dmm.c | |||
| @@ -0,0 +1,533 @@ | |||
| 1 | /* | ||
| 2 | * dmm.c | ||
| 3 | * | ||
| 4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
| 5 | * | ||
| 6 | * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address | ||
| 7 | * space that can be directly mapped to any MPU buffer or memory region | ||
| 8 | * | ||
| 9 | * Notes: | ||
| 10 | * Region: Generic memory entitiy having a start address and a size | ||
| 11 | * Chunk: Reserved region | ||
| 12 | * | ||
| 13 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
| 14 | * | ||
| 15 | * This package is free software; you can redistribute it and/or modify | ||
| 16 | * it under the terms of the GNU General Public License version 2 as | ||
| 17 | * published by the Free Software Foundation. | ||
| 18 | * | ||
| 19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
| 20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
| 21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
| 22 | */ | ||
| 23 | #include <linux/types.h> | ||
| 24 | |||
| 25 | /* ----------------------------------- Host OS */ | ||
| 26 | #include <dspbridge/host_os.h> | ||
| 27 | |||
| 28 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
| 29 | #include <dspbridge/dbdefs.h> | ||
| 30 | |||
| 31 | /* ----------------------------------- Trace & Debug */ | ||
| 32 | #include <dspbridge/dbc.h> | ||
| 33 | |||
| 34 | /* ----------------------------------- OS Adaptation Layer */ | ||
| 35 | #include <dspbridge/sync.h> | ||
| 36 | |||
| 37 | /* ----------------------------------- Platform Manager */ | ||
| 38 | #include <dspbridge/dev.h> | ||
| 39 | #include <dspbridge/proc.h> | ||
| 40 | |||
| 41 | /* ----------------------------------- This */ | ||
| 42 | #include <dspbridge/dmm.h> | ||
| 43 | |||
| 44 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
| 45 | #define DMM_ADDR_VIRTUAL(a) \ | ||
| 46 | (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\ | ||
| 47 | dyn_mem_map_beg) | ||
| 48 | #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K) | ||
| 49 | |||
| 50 | /* DMM Mgr */ | ||
| 51 | struct dmm_object { | ||
| 52 | /* Dmm Lock is used to serialize access mem manager for | ||
| 53 | * multi-threads. */ | ||
| 54 | spinlock_t dmm_lock; /* Lock to access dmm mgr */ | ||
| 55 | }; | ||
| 56 | |||
| 57 | /* ----------------------------------- Globals */ | ||
| 58 | static u32 refs; /* module reference count */ | ||
| 59 | struct map_page { | ||
| 60 | u32 region_size:15; | ||
| 61 | u32 mapped_size:15; | ||
| 62 | u32 reserved:1; | ||
| 63 | u32 mapped:1; | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* Create the free list */ | ||
| 67 | static struct map_page *virtual_mapping_table; | ||
| 68 | static u32 free_region; /* The index of free region */ | ||
| 69 | static u32 free_size; | ||
| 70 | static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */ | ||
| 71 | static u32 table_size; /* The size of virt and phys pages tables */ | ||
| 72 | |||
| 73 | /* ----------------------------------- Function Prototypes */ | ||
| 74 | static struct map_page *get_region(u32 addr); | ||
| 75 | static struct map_page *get_free_region(u32 len); | ||
| 76 | static struct map_page *get_mapped_region(u32 addrs); | ||
| 77 | |||
| 78 | /* ======== dmm_create_tables ======== | ||
| 79 | * Purpose: | ||
| 80 | * Create table to hold the information of physical address | ||
| 81 | * the buffer pages that is passed by the user, and the table | ||
| 82 | * to hold the information of the virtual memory that is reserved | ||
| 83 | * for DSP. | ||
| 84 | */ | ||
| 85 | int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size) | ||
| 86 | { | ||
| 87 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
| 88 | int status = 0; | ||
| 89 | |||
| 90 | status = dmm_delete_tables(dmm_obj); | ||
| 91 | if (!status) { | ||
| 92 | dyn_mem_map_beg = addr; | ||
| 93 | table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K; | ||
| 94 | /* Create the free list */ | ||
| 95 | virtual_mapping_table = __vmalloc(table_size * | ||
| 96 | sizeof(struct map_page), GFP_KERNEL | | ||
| 97 | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
| 98 | if (virtual_mapping_table == NULL) | ||
| 99 | status = -ENOMEM; | ||
| 100 | else { | ||
| 101 | /* On successful allocation, | ||
| 102 | * all entries are zero ('free') */ | ||
| 103 | free_region = 0; | ||
| 104 | free_size = table_size * PG_SIZE4K; | ||
| 105 | virtual_mapping_table[0].region_size = table_size; | ||
| 106 | } | ||
| 107 | } | ||
| 108 | |||
| 109 | if (status) | ||
| 110 | pr_err("%s: failure, status 0x%x\n", __func__, status); | ||
| 111 | |||
| 112 | return status; | ||
| 113 | } | ||
| 114 | |||
| 115 | /* | ||
| 116 | * ======== dmm_create ======== | ||
| 117 | * Purpose: | ||
| 118 | * Create a dynamic memory manager object. | ||
| 119 | */ | ||
| 120 | int dmm_create(struct dmm_object **dmm_manager, | ||
| 121 | struct dev_object *hdev_obj, | ||
| 122 | const struct dmm_mgrattrs *mgr_attrts) | ||
| 123 | { | ||
| 124 | struct dmm_object *dmm_obj = NULL; | ||
| 125 | int status = 0; | ||
| 126 | DBC_REQUIRE(refs > 0); | ||
| 127 | DBC_REQUIRE(dmm_manager != NULL); | ||
| 128 | |||
| 129 | *dmm_manager = NULL; | ||
| 130 | /* create, zero, and tag a cmm mgr object */ | ||
| 131 | dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL); | ||
| 132 | if (dmm_obj != NULL) { | ||
| 133 | spin_lock_init(&dmm_obj->dmm_lock); | ||
| 134 | *dmm_manager = dmm_obj; | ||
| 135 | } else { | ||
| 136 | status = -ENOMEM; | ||
| 137 | } | ||
| 138 | |||
| 139 | return status; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * ======== dmm_destroy ======== | ||
| 144 | * Purpose: | ||
| 145 | * Release the communication memory manager resources. | ||
| 146 | */ | ||
| 147 | int dmm_destroy(struct dmm_object *dmm_mgr) | ||
| 148 | { | ||
| 149 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
| 150 | int status = 0; | ||
| 151 | |||
| 152 | DBC_REQUIRE(refs > 0); | ||
| 153 | if (dmm_mgr) { | ||
| 154 | status = dmm_delete_tables(dmm_obj); | ||
| 155 | if (!status) | ||
| 156 | kfree(dmm_obj); | ||
| 157 | } else | ||
| 158 | status = -EFAULT; | ||
| 159 | |||
| 160 | return status; | ||
| 161 | } | ||
| 162 | |||
| 163 | /* | ||
| 164 | * ======== dmm_delete_tables ======== | ||
| 165 | * Purpose: | ||
| 166 | * Delete DMM Tables. | ||
| 167 | */ | ||
| 168 | int dmm_delete_tables(struct dmm_object *dmm_mgr) | ||
| 169 | { | ||
| 170 | int status = 0; | ||
| 171 | |||
| 172 | DBC_REQUIRE(refs > 0); | ||
| 173 | /* Delete all DMM tables */ | ||
| 174 | if (dmm_mgr) | ||
| 175 | vfree(virtual_mapping_table); | ||
| 176 | else | ||
| 177 | status = -EFAULT; | ||
| 178 | return status; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* | ||
| 182 | * ======== dmm_exit ======== | ||
| 183 | * Purpose: | ||
| 184 | * Discontinue usage of module; free resources when reference count | ||
| 185 | * reaches 0. | ||
| 186 | */ | ||
| 187 | void dmm_exit(void) | ||
| 188 | { | ||
| 189 | DBC_REQUIRE(refs > 0); | ||
| 190 | |||
| 191 | refs--; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* | ||
| 195 | * ======== dmm_get_handle ======== | ||
| 196 | * Purpose: | ||
| 197 | * Return the dynamic memory manager object for this device. | ||
| 198 | * This is typically called from the client process. | ||
| 199 | */ | ||
| 200 | int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager) | ||
| 201 | { | ||
| 202 | int status = 0; | ||
| 203 | struct dev_object *hdev_obj; | ||
| 204 | |||
| 205 | DBC_REQUIRE(refs > 0); | ||
| 206 | DBC_REQUIRE(dmm_manager != NULL); | ||
| 207 | if (hprocessor != NULL) | ||
| 208 | status = proc_get_dev_object(hprocessor, &hdev_obj); | ||
| 209 | else | ||
| 210 | hdev_obj = dev_get_first(); /* default */ | ||
| 211 | |||
| 212 | if (!status) | ||
| 213 | status = dev_get_dmm_mgr(hdev_obj, dmm_manager); | ||
| 214 | |||
| 215 | return status; | ||
| 216 | } | ||
| 217 | |||
| 218 | /* | ||
| 219 | * ======== dmm_init ======== | ||
| 220 | * Purpose: | ||
| 221 | * Initializes private state of DMM module. | ||
| 222 | */ | ||
| 223 | bool dmm_init(void) | ||
| 224 | { | ||
| 225 | bool ret = true; | ||
| 226 | |||
| 227 | DBC_REQUIRE(refs >= 0); | ||
| 228 | |||
| 229 | if (ret) | ||
| 230 | refs++; | ||
| 231 | |||
| 232 | DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); | ||
| 233 | |||
| 234 | virtual_mapping_table = NULL; | ||
| 235 | table_size = 0; | ||
| 236 | |||
| 237 | return ret; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* | ||
| 241 | * ======== dmm_map_memory ======== | ||
| 242 | * Purpose: | ||
| 243 | * Add a mapping block to the reserved chunk. DMM assumes that this block | ||
| 244 | * will be mapped in the DSP/IVA's address space. DMM returns an error if a | ||
| 245 | * mapping overlaps another one. This function stores the info that will be | ||
| 246 | * required later while unmapping the block. | ||
| 247 | */ | ||
| 248 | int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size) | ||
| 249 | { | ||
| 250 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
| 251 | struct map_page *chunk; | ||
| 252 | int status = 0; | ||
| 253 | |||
| 254 | spin_lock(&dmm_obj->dmm_lock); | ||
| 255 | /* Find the Reserved memory chunk containing the DSP block to | ||
| 256 | * be mapped */ | ||
| 257 | chunk = (struct map_page *)get_region(addr); | ||
| 258 | if (chunk != NULL) { | ||
| 259 | /* Mark the region 'mapped', leave the 'reserved' info as-is */ | ||
| 260 | chunk->mapped = true; | ||
| 261 | chunk->mapped_size = (size / PG_SIZE4K); | ||
| 262 | } else | ||
| 263 | status = -ENOENT; | ||
| 264 | spin_unlock(&dmm_obj->dmm_lock); | ||
| 265 | |||
| 266 | dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, " | ||
| 267 | "chunk %p", __func__, dmm_mgr, addr, size, status, chunk); | ||
| 268 | |||
| 269 | return status; | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * ======== dmm_reserve_memory ======== | ||
| 274 | * Purpose: | ||
| 275 | * Reserve a chunk of virtually contiguous DSP/IVA address space. | ||
| 276 | */ | ||
| 277 | int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, | ||
| 278 | u32 *prsv_addr) | ||
| 279 | { | ||
| 280 | int status = 0; | ||
| 281 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
| 282 | struct map_page *node; | ||
| 283 | u32 rsv_addr = 0; | ||
| 284 | u32 rsv_size = 0; | ||
| 285 | |||
| 286 | spin_lock(&dmm_obj->dmm_lock); | ||
| 287 | |||
| 288 | /* Try to get a DSP chunk from the free list */ | ||
| 289 | node = get_free_region(size); | ||
| 290 | if (node != NULL) { | ||
| 291 | /* DSP chunk of given size is available. */ | ||
| 292 | rsv_addr = DMM_ADDR_VIRTUAL(node); | ||
| 293 | /* Calculate the number entries to use */ | ||
| 294 | rsv_size = size / PG_SIZE4K; | ||
| 295 | if (rsv_size < node->region_size) { | ||
| 296 | /* Mark remainder of free region */ | ||
| 297 | node[rsv_size].mapped = false; | ||
| 298 | node[rsv_size].reserved = false; | ||
| 299 | node[rsv_size].region_size = | ||
| 300 | node->region_size - rsv_size; | ||
| 301 | node[rsv_size].mapped_size = 0; | ||
| 302 | } | ||
| 303 | /* get_region will return first fit chunk. But we only use what | ||
| 304 | is requested. */ | ||
| 305 | node->mapped = false; | ||
| 306 | node->reserved = true; | ||
| 307 | node->region_size = rsv_size; | ||
| 308 | node->mapped_size = 0; | ||
| 309 | /* Return the chunk's starting address */ | ||
| 310 | *prsv_addr = rsv_addr; | ||
| 311 | } else | ||
| 312 | /*dSP chunk of given size is not available */ | ||
| 313 | status = -ENOMEM; | ||
| 314 | |||
| 315 | spin_unlock(&dmm_obj->dmm_lock); | ||
| 316 | |||
| 317 | dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, " | ||
| 318 | "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size, | ||
| 319 | prsv_addr, status, rsv_addr, rsv_size); | ||
| 320 | |||
| 321 | return status; | ||
| 322 | } | ||
| 323 | |||
| 324 | /* | ||
| 325 | * ======== dmm_un_map_memory ======== | ||
| 326 | * Purpose: | ||
| 327 | * Remove the mapped block from the reserved chunk. | ||
| 328 | */ | ||
| 329 | int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize) | ||
| 330 | { | ||
| 331 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
| 332 | struct map_page *chunk; | ||
| 333 | int status = 0; | ||
| 334 | |||
| 335 | spin_lock(&dmm_obj->dmm_lock); | ||
| 336 | chunk = get_mapped_region(addr); | ||
| 337 | if (chunk == NULL) | ||
| 338 | status = -ENOENT; | ||
| 339 | |||
| 340 | if (!status) { | ||
| 341 | /* Unmap the region */ | ||
| 342 | *psize = chunk->mapped_size * PG_SIZE4K; | ||
| 343 | chunk->mapped = false; | ||
| 344 | chunk->mapped_size = 0; | ||
| 345 | } | ||
| 346 | spin_unlock(&dmm_obj->dmm_lock); | ||
| 347 | |||
| 348 | dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, " | ||
| 349 | "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk); | ||
| 350 | |||
| 351 | return status; | ||
| 352 | } | ||
| 353 | |||
| 354 | /* | ||
| 355 | * ======== dmm_un_reserve_memory ======== | ||
| 356 | * Purpose: | ||
| 357 | * Free a chunk of reserved DSP/IVA address space. | ||
| 358 | */ | ||
| 359 | int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr) | ||
| 360 | { | ||
| 361 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
| 362 | struct map_page *chunk; | ||
| 363 | u32 i; | ||
| 364 | int status = 0; | ||
| 365 | u32 chunk_size; | ||
| 366 | |||
| 367 | spin_lock(&dmm_obj->dmm_lock); | ||
| 368 | |||
| 369 | /* Find the chunk containing the reserved address */ | ||
| 370 | chunk = get_mapped_region(rsv_addr); | ||
| 371 | if (chunk == NULL) | ||
| 372 | status = -ENOENT; | ||
| 373 | |||
| 374 | if (!status) { | ||
| 375 | /* Free all the mapped pages for this reserved region */ | ||
| 376 | i = 0; | ||
| 377 | while (i < chunk->region_size) { | ||
| 378 | if (chunk[i].mapped) { | ||
| 379 | /* Remove mapping from the page tables. */ | ||
| 380 | chunk_size = chunk[i].mapped_size; | ||
| 381 | /* Clear the mapping flags */ | ||
| 382 | chunk[i].mapped = false; | ||
| 383 | chunk[i].mapped_size = 0; | ||
| 384 | i += chunk_size; | ||
| 385 | } else | ||
| 386 | i++; | ||
| 387 | } | ||
| 388 | /* Clear the flags (mark the region 'free') */ | ||
| 389 | chunk->reserved = false; | ||
| 390 | /* NOTE: We do NOT coalesce free regions here. | ||
| 391 | * Free regions are coalesced in get_region(), as it traverses | ||
| 392 | *the whole mapping table | ||
| 393 | */ | ||
| 394 | } | ||
| 395 | spin_unlock(&dmm_obj->dmm_lock); | ||
| 396 | |||
| 397 | dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p", | ||
| 398 | __func__, dmm_mgr, rsv_addr, status, chunk); | ||
| 399 | |||
| 400 | return status; | ||
| 401 | } | ||
| 402 | |||
| 403 | /* | ||
| 404 | * ======== get_region ======== | ||
| 405 | * Purpose: | ||
| 406 | * Returns a region containing the specified memory region | ||
| 407 | */ | ||
| 408 | static struct map_page *get_region(u32 addr) | ||
| 409 | { | ||
| 410 | struct map_page *curr_region = NULL; | ||
| 411 | u32 i = 0; | ||
| 412 | |||
| 413 | if (virtual_mapping_table != NULL) { | ||
| 414 | /* find page mapped by this address */ | ||
| 415 | i = DMM_ADDR_TO_INDEX(addr); | ||
| 416 | if (i < table_size) | ||
| 417 | curr_region = virtual_mapping_table + i; | ||
| 418 | } | ||
| 419 | |||
| 420 | dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n", | ||
| 421 | __func__, curr_region, free_region, free_size); | ||
| 422 | return curr_region; | ||
| 423 | } | ||
| 424 | |||
| 425 | /* | ||
| 426 | * ======== get_free_region ======== | ||
| 427 | * Purpose: | ||
| 428 | * Returns the requested free region | ||
| 429 | */ | ||
| 430 | static struct map_page *get_free_region(u32 len) | ||
| 431 | { | ||
| 432 | struct map_page *curr_region = NULL; | ||
| 433 | u32 i = 0; | ||
| 434 | u32 region_size = 0; | ||
| 435 | u32 next_i = 0; | ||
| 436 | |||
| 437 | if (virtual_mapping_table == NULL) | ||
| 438 | return curr_region; | ||
| 439 | if (len > free_size) { | ||
| 440 | /* Find the largest free region | ||
| 441 | * (coalesce during the traversal) */ | ||
| 442 | while (i < table_size) { | ||
| 443 | region_size = virtual_mapping_table[i].region_size; | ||
| 444 | next_i = i + region_size; | ||
| 445 | if (virtual_mapping_table[i].reserved == false) { | ||
| 446 | /* Coalesce, if possible */ | ||
| 447 | if (next_i < table_size && | ||
| 448 | virtual_mapping_table[next_i].reserved | ||
| 449 | == false) { | ||
| 450 | virtual_mapping_table[i].region_size += | ||
| 451 | virtual_mapping_table | ||
| 452 | [next_i].region_size; | ||
| 453 | continue; | ||
| 454 | } | ||
| 455 | region_size *= PG_SIZE4K; | ||
| 456 | if (region_size > free_size) { | ||
| 457 | free_region = i; | ||
| 458 | free_size = region_size; | ||
| 459 | } | ||
| 460 | } | ||
| 461 | i = next_i; | ||
| 462 | } | ||
| 463 | } | ||
| 464 | if (len <= free_size) { | ||
| 465 | curr_region = virtual_mapping_table + free_region; | ||
| 466 | free_region += (len / PG_SIZE4K); | ||
| 467 | free_size -= len; | ||
| 468 | } | ||
| 469 | return curr_region; | ||
| 470 | } | ||
| 471 | |||
| 472 | /* | ||
| 473 | * ======== get_mapped_region ======== | ||
| 474 | * Purpose: | ||
| 475 | * Returns the requestedmapped region | ||
| 476 | */ | ||
| 477 | static struct map_page *get_mapped_region(u32 addrs) | ||
| 478 | { | ||
| 479 | u32 i = 0; | ||
| 480 | struct map_page *curr_region = NULL; | ||
| 481 | |||
| 482 | if (virtual_mapping_table == NULL) | ||
| 483 | return curr_region; | ||
| 484 | |||
| 485 | i = DMM_ADDR_TO_INDEX(addrs); | ||
| 486 | if (i < table_size && (virtual_mapping_table[i].mapped || | ||
| 487 | virtual_mapping_table[i].reserved)) | ||
| 488 | curr_region = virtual_mapping_table + i; | ||
| 489 | return curr_region; | ||
| 490 | } | ||
| 491 | |||
| 492 | #ifdef DSP_DMM_DEBUG | ||
| 493 | u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr) | ||
| 494 | { | ||
| 495 | struct map_page *curr_node = NULL; | ||
| 496 | u32 i; | ||
| 497 | u32 freemem = 0; | ||
| 498 | u32 bigsize = 0; | ||
| 499 | |||
| 500 | spin_lock(&dmm_mgr->dmm_lock); | ||
| 501 | |||
| 502 | if (virtual_mapping_table != NULL) { | ||
| 503 | for (i = 0; i < table_size; i += | ||
| 504 | virtual_mapping_table[i].region_size) { | ||
| 505 | curr_node = virtual_mapping_table + i; | ||
| 506 | if (curr_node->reserved) { | ||
| 507 | /*printk("RESERVED size = 0x%x, " | ||
| 508 | "Map size = 0x%x\n", | ||
| 509 | (curr_node->region_size * PG_SIZE4K), | ||
| 510 | (curr_node->mapped == false) ? 0 : | ||
| 511 | (curr_node->mapped_size * PG_SIZE4K)); | ||
| 512 | */ | ||
| 513 | } else { | ||
| 514 | /* printk("UNRESERVED size = 0x%x\n", | ||
| 515 | (curr_node->region_size * PG_SIZE4K)); | ||
| 516 | */ | ||
| 517 | freemem += (curr_node->region_size * PG_SIZE4K); | ||
| 518 | if (curr_node->region_size > bigsize) | ||
| 519 | bigsize = curr_node->region_size; | ||
| 520 | } | ||
| 521 | } | ||
| 522 | } | ||
| 523 | spin_unlock(&dmm_mgr->dmm_lock); | ||
| 524 | printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n", | ||
| 525 | freemem / (1024 * 1024)); | ||
| 526 | printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n", | ||
| 527 | (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024)); | ||
| 528 | printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n", | ||
| 529 | (bigsize * PG_SIZE4K / (1024 * 1024))); | ||
| 530 | |||
| 531 | return 0; | ||
| 532 | } | ||
| 533 | #endif | ||
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c index 981551ce4d78..86ca785f1913 100644 --- a/drivers/staging/tidspbridge/pmgr/dspapi.c +++ b/drivers/staging/tidspbridge/pmgr/dspapi.c | |||
| @@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt) | |||
| 993 | /* | 993 | /* |
| 994 | * ======== procwrap_reserve_memory ======== | 994 | * ======== procwrap_reserve_memory ======== |
| 995 | */ | 995 | */ |
| 996 | u32 __deprecated procwrap_reserve_memory(union trapped_args *args, | 996 | u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt) |
| 997 | void *pr_ctxt) | ||
| 998 | { | 997 | { |
| 999 | return 0; | 998 | int status; |
| 999 | void *prsv_addr; | ||
| 1000 | void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; | ||
| 1001 | |||
| 1002 | if ((args->args_proc_rsvmem.ul_size <= 0) || | ||
| 1003 | (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0) | ||
| 1004 | return -EINVAL; | ||
| 1005 | |||
| 1006 | status = proc_reserve_memory(hprocessor, | ||
| 1007 | args->args_proc_rsvmem.ul_size, &prsv_addr, | ||
| 1008 | pr_ctxt); | ||
| 1009 | if (!status) { | ||
| 1010 | if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) { | ||
| 1011 | status = -EINVAL; | ||
| 1012 | proc_un_reserve_memory(args->args_proc_rsvmem. | ||
| 1013 | hprocessor, prsv_addr, pr_ctxt); | ||
| 1014 | } | ||
| 1015 | } | ||
| 1016 | return status; | ||
| 1000 | } | 1017 | } |
| 1001 | 1018 | ||
| 1002 | /* | 1019 | /* |
| @@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt) | |||
| 1025 | /* | 1042 | /* |
| 1026 | * ======== procwrap_un_reserve_memory ======== | 1043 | * ======== procwrap_un_reserve_memory ======== |
| 1027 | */ | 1044 | */ |
| 1028 | u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, | 1045 | u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt) |
| 1029 | void *pr_ctxt) | ||
| 1030 | { | 1046 | { |
| 1031 | return 0; | 1047 | int status; |
| 1048 | void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; | ||
| 1049 | |||
| 1050 | status = proc_un_reserve_memory(hprocessor, | ||
| 1051 | args->args_proc_unrsvmem.prsv_addr, | ||
| 1052 | pr_ctxt); | ||
| 1053 | return status; | ||
| 1032 | } | 1054 | } |
| 1033 | 1055 | ||
| 1034 | /* | 1056 | /* |
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c index 91cc168516e5..81b1b9013550 100644 --- a/drivers/staging/tidspbridge/rmgr/drv.c +++ b/drivers/staging/tidspbridge/rmgr/drv.c | |||
| @@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) | |||
| 146 | struct process_context *ctxt = (struct process_context *)process_ctxt; | 146 | struct process_context *ctxt = (struct process_context *)process_ctxt; |
| 147 | int status = 0; | 147 | int status = 0; |
| 148 | struct dmm_map_object *temp_map, *map_obj; | 148 | struct dmm_map_object *temp_map, *map_obj; |
| 149 | struct dmm_rsv_object *temp_rsv, *rsv_obj; | ||
| 149 | 150 | ||
| 150 | /* Free DMM mapped memory resources */ | 151 | /* Free DMM mapped memory resources */ |
| 151 | list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { | 152 | list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { |
| @@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) | |||
| 155 | pr_err("%s: proc_un_map failed!" | 156 | pr_err("%s: proc_un_map failed!" |
| 156 | " status = 0x%xn", __func__, status); | 157 | " status = 0x%xn", __func__, status); |
| 157 | } | 158 | } |
| 159 | |||
| 160 | /* Free DMM reserved memory resources */ | ||
| 161 | list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { | ||
| 162 | status = proc_un_reserve_memory(ctxt->hprocessor, (void *) | ||
| 163 | rsv_obj->dsp_reserved_addr, | ||
| 164 | ctxt); | ||
| 165 | if (status) | ||
| 166 | pr_err("%s: proc_un_reserve_memory failed!" | ||
| 167 | " status = 0x%xn", __func__, status); | ||
| 168 | } | ||
| 158 | return status; | 169 | return status; |
| 159 | } | 170 | } |
| 160 | 171 | ||
| @@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res) | |||
| 732 | host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); | 743 | host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); |
| 733 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); | 744 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); |
| 734 | dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); | 745 | dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); |
| 746 | dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); | ||
| 735 | 747 | ||
| 736 | /* for 24xx base port is not mapping the mamory for DSP | 748 | /* for 24xx base port is not mapping the mamory for DSP |
| 737 | * internal memory TODO Do a ioremap here */ | 749 | * internal memory TODO Do a ioremap here */ |
| @@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources) | |||
| 785 | OMAP_PER_PRM_SIZE); | 797 | OMAP_PER_PRM_SIZE); |
| 786 | host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, | 798 | host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, |
| 787 | OMAP_CORE_PRM_SIZE); | 799 | OMAP_CORE_PRM_SIZE); |
| 800 | host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, | ||
| 801 | OMAP_DMMU_SIZE); | ||
| 788 | 802 | ||
| 789 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", | 803 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", |
| 790 | host_res->dw_mem_base[0]); | 804 | host_res->dw_mem_base[0]); |
| @@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources) | |||
| 796 | host_res->dw_mem_base[3]); | 810 | host_res->dw_mem_base[3]); |
| 797 | dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", | 811 | dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", |
| 798 | host_res->dw_mem_base[4]); | 812 | host_res->dw_mem_base[4]); |
| 813 | dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); | ||
| 799 | 814 | ||
| 800 | shm_size = drv_datap->shm_size; | 815 | shm_size = drv_datap->shm_size; |
| 801 | if (shm_size >= 0x10000) { | 816 | if (shm_size >= 0x10000) { |
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 34be43fec044..324fcdffb3b3 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c | |||
| @@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp) | |||
| 509 | pr_ctxt->res_state = PROC_RES_ALLOCATED; | 509 | pr_ctxt->res_state = PROC_RES_ALLOCATED; |
| 510 | spin_lock_init(&pr_ctxt->dmm_map_lock); | 510 | spin_lock_init(&pr_ctxt->dmm_map_lock); |
| 511 | INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); | 511 | INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); |
| 512 | spin_lock_init(&pr_ctxt->dmm_rsv_lock); | ||
| 513 | INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list); | ||
| 512 | 514 | ||
| 513 | pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); | 515 | pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); |
| 514 | if (pr_ctxt->node_id) { | 516 | if (pr_ctxt->node_id) { |
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c index a660247f527a..1562f3c1281c 100644 --- a/drivers/staging/tidspbridge/rmgr/node.c +++ b/drivers/staging/tidspbridge/rmgr/node.c | |||
| @@ -56,6 +56,7 @@ | |||
| 56 | /* ----------------------------------- This */ | 56 | /* ----------------------------------- This */ |
| 57 | #include <dspbridge/nodepriv.h> | 57 | #include <dspbridge/nodepriv.h> |
| 58 | #include <dspbridge/node.h> | 58 | #include <dspbridge/node.h> |
| 59 | #include <dspbridge/dmm.h> | ||
| 59 | 60 | ||
| 60 | /* Static/Dynamic Loader includes */ | 61 | /* Static/Dynamic Loader includes */ |
| 61 | #include <dspbridge/dbll.h> | 62 | #include <dspbridge/dbll.h> |
| @@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor, | |||
| 316 | u32 mapped_addr = 0; | 317 | u32 mapped_addr = 0; |
| 317 | u32 map_attrs = 0x0; | 318 | u32 map_attrs = 0x0; |
| 318 | struct dsp_processorstate proc_state; | 319 | struct dsp_processorstate proc_state; |
| 320 | #ifdef DSP_DMM_DEBUG | ||
| 321 | struct dmm_object *dmm_mgr; | ||
| 322 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
| 323 | #endif | ||
| 319 | 324 | ||
| 320 | void *node_res; | 325 | void *node_res; |
| 321 | 326 | ||
| @@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor, | |||
| 425 | if (status) | 430 | if (status) |
| 426 | goto func_cont; | 431 | goto func_cont; |
| 427 | 432 | ||
| 433 | status = proc_reserve_memory(hprocessor, | ||
| 434 | pnode->create_args.asa.task_arg_obj. | ||
| 435 | heap_size + PAGE_SIZE, | ||
| 436 | (void **)&(pnode->create_args.asa. | ||
| 437 | task_arg_obj.udsp_heap_res_addr), | ||
| 438 | pr_ctxt); | ||
| 439 | if (status) { | ||
| 440 | pr_err("%s: Failed to reserve memory for heap: 0x%x\n", | ||
| 441 | __func__, status); | ||
| 442 | goto func_cont; | ||
| 443 | } | ||
| 444 | #ifdef DSP_DMM_DEBUG | ||
| 445 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
| 446 | if (!dmm_mgr) { | ||
| 447 | status = DSP_EHANDLE; | ||
| 448 | goto func_cont; | ||
| 449 | } | ||
| 450 | |||
| 451 | dmm_mem_map_dump(dmm_mgr); | ||
| 452 | #endif | ||
| 453 | |||
| 428 | map_attrs |= DSP_MAPLITTLEENDIAN; | 454 | map_attrs |= DSP_MAPLITTLEENDIAN; |
| 429 | map_attrs |= DSP_MAPELEMSIZE32; | 455 | map_attrs |= DSP_MAPELEMSIZE32; |
| 430 | map_attrs |= DSP_MAPVIRTUALADDR; | 456 | map_attrs |= DSP_MAPVIRTUALADDR; |
| 431 | status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, | 457 | status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, |
| 432 | pnode->create_args.asa.task_arg_obj.heap_size, | 458 | pnode->create_args.asa.task_arg_obj.heap_size, |
| 433 | NULL, (void **)&mapped_addr, map_attrs, | 459 | (void *)pnode->create_args.asa.task_arg_obj. |
| 460 | udsp_heap_res_addr, (void **)&mapped_addr, map_attrs, | ||
| 434 | pr_ctxt); | 461 | pr_ctxt); |
| 435 | if (status) | 462 | if (status) |
| 436 | pr_err("%s: Failed to map memory for Heap: 0x%x\n", | 463 | pr_err("%s: Failed to map memory for Heap: 0x%x\n", |
| @@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode, | |||
| 2484 | struct stream_chnl stream; | 2511 | struct stream_chnl stream; |
| 2485 | struct node_msgargs node_msg_args; | 2512 | struct node_msgargs node_msg_args; |
| 2486 | struct node_taskargs task_arg_obj; | 2513 | struct node_taskargs task_arg_obj; |
| 2487 | 2514 | #ifdef DSP_DMM_DEBUG | |
| 2515 | struct dmm_object *dmm_mgr; | ||
| 2516 | struct proc_object *p_proc_object = | ||
| 2517 | (struct proc_object *)hnode->hprocessor; | ||
| 2518 | #endif | ||
| 2488 | int status; | 2519 | int status; |
| 2489 | if (!hnode) | 2520 | if (!hnode) |
| 2490 | goto func_end; | 2521 | goto func_end; |
| @@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode, | |||
| 2545 | status = proc_un_map(hnode->hprocessor, (void *) | 2576 | status = proc_un_map(hnode->hprocessor, (void *) |
| 2546 | task_arg_obj.udsp_heap_addr, | 2577 | task_arg_obj.udsp_heap_addr, |
| 2547 | pr_ctxt); | 2578 | pr_ctxt); |
| 2579 | |||
| 2580 | status = proc_un_reserve_memory(hnode->hprocessor, | ||
| 2581 | (void *) | ||
| 2582 | task_arg_obj. | ||
| 2583 | udsp_heap_res_addr, | ||
| 2584 | pr_ctxt); | ||
| 2585 | #ifdef DSP_DMM_DEBUG | ||
| 2586 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
| 2587 | if (dmm_mgr) | ||
| 2588 | dmm_mem_map_dump(dmm_mgr); | ||
| 2589 | else | ||
| 2590 | status = DSP_EHANDLE; | ||
| 2591 | #endif | ||
| 2548 | } | 2592 | } |
| 2549 | } | 2593 | } |
| 2550 | if (node_type != NODE_MESSAGE) { | 2594 | if (node_type != NODE_MESSAGE) { |
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c index 7a15a02efedf..b47d7aa747b1 100644 --- a/drivers/staging/tidspbridge/rmgr/proc.c +++ b/drivers/staging/tidspbridge/rmgr/proc.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <dspbridge/cod.h> | 39 | #include <dspbridge/cod.h> |
| 40 | #include <dspbridge/dev.h> | 40 | #include <dspbridge/dev.h> |
| 41 | #include <dspbridge/procpriv.h> | 41 | #include <dspbridge/procpriv.h> |
| 42 | #include <dspbridge/dmm.h> | ||
| 42 | 43 | ||
| 43 | /* ----------------------------------- Resource Manager */ | 44 | /* ----------------------------------- Resource Manager */ |
| 44 | #include <dspbridge/mgr.h> | 45 | #include <dspbridge/mgr.h> |
| @@ -51,7 +52,6 @@ | |||
| 51 | #include <dspbridge/msg.h> | 52 | #include <dspbridge/msg.h> |
| 52 | #include <dspbridge/dspioctl.h> | 53 | #include <dspbridge/dspioctl.h> |
| 53 | #include <dspbridge/drv.h> | 54 | #include <dspbridge/drv.h> |
| 54 | #include <_tiomap.h> | ||
| 55 | 55 | ||
| 56 | /* ----------------------------------- This */ | 56 | /* ----------------------------------- This */ |
| 57 | #include <dspbridge/proc.h> | 57 | #include <dspbridge/proc.h> |
| @@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, | |||
| 151 | return map_obj; | 151 | return map_obj; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static int match_exact_map_obj(struct dmm_map_object *map_obj, | ||
| 155 | u32 dsp_addr, u32 size) | ||
| 156 | { | ||
| 157 | if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) | ||
| 158 | pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", | ||
| 159 | __func__, dsp_addr, map_obj->size, size); | ||
| 160 | |||
| 161 | return map_obj->dsp_addr == dsp_addr && | ||
| 162 | map_obj->size == size; | ||
| 163 | } | ||
| 164 | |||
| 154 | static void remove_mapping_information(struct process_context *pr_ctxt, | 165 | static void remove_mapping_information(struct process_context *pr_ctxt, |
| 155 | u32 dsp_addr) | 166 | u32 dsp_addr, u32 size) |
| 156 | { | 167 | { |
| 157 | struct dmm_map_object *map_obj; | 168 | struct dmm_map_object *map_obj; |
| 158 | 169 | ||
| 159 | pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); | 170 | pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, |
| 171 | dsp_addr, size); | ||
| 160 | 172 | ||
| 161 | spin_lock(&pr_ctxt->dmm_map_lock); | 173 | spin_lock(&pr_ctxt->dmm_map_lock); |
| 162 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | 174 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { |
| 163 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", | 175 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", |
| 164 | __func__, | 176 | __func__, |
| 165 | map_obj->mpu_addr, | 177 | map_obj->mpu_addr, |
| 166 | map_obj->dsp_addr); | 178 | map_obj->dsp_addr, |
| 179 | map_obj->size); | ||
| 167 | 180 | ||
| 168 | if (map_obj->dsp_addr == dsp_addr) { | 181 | if (match_exact_map_obj(map_obj, dsp_addr, size)) { |
| 169 | pr_debug("%s: match, deleting map info\n", __func__); | 182 | pr_debug("%s: match, deleting map info\n", __func__); |
| 170 | list_del(&map_obj->link); | 183 | list_del(&map_obj->link); |
| 171 | kfree(map_obj->dma_info.sg); | 184 | kfree(map_obj->dma_info.sg); |
| @@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index, | |||
| 1077 | s32 cnew_envp; /* " " in new_envp[] */ | 1090 | s32 cnew_envp; /* " " in new_envp[] */ |
| 1078 | s32 nproc_id = 0; /* Anticipate MP version. */ | 1091 | s32 nproc_id = 0; /* Anticipate MP version. */ |
| 1079 | struct dcd_manager *hdcd_handle; | 1092 | struct dcd_manager *hdcd_handle; |
| 1093 | struct dmm_object *dmm_mgr; | ||
| 1080 | u32 dw_ext_end; | 1094 | u32 dw_ext_end; |
| 1081 | u32 proc_id; | 1095 | u32 proc_id; |
| 1082 | int brd_state; | 1096 | int brd_state; |
| @@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index, | |||
| 1267 | if (!status) | 1281 | if (!status) |
| 1268 | status = cod_get_sym_value(cod_mgr, EXTEND, | 1282 | status = cod_get_sym_value(cod_mgr, EXTEND, |
| 1269 | &dw_ext_end); | 1283 | &dw_ext_end); |
| 1284 | |||
| 1285 | /* Reset DMM structs and add an initial free chunk */ | ||
| 1286 | if (!status) { | ||
| 1287 | status = | ||
| 1288 | dev_get_dmm_mgr(p_proc_object->hdev_obj, | ||
| 1289 | &dmm_mgr); | ||
| 1290 | if (dmm_mgr) { | ||
| 1291 | /* Set dw_ext_end to DMM START u8 | ||
| 1292 | * address */ | ||
| 1293 | dw_ext_end = | ||
| 1294 | (dw_ext_end + 1) * DSPWORDSIZE; | ||
| 1295 | /* DMM memory is from EXT_END */ | ||
| 1296 | status = dmm_create_tables(dmm_mgr, | ||
| 1297 | dw_ext_end, | ||
| 1298 | DMMPOOLSIZE); | ||
| 1299 | } else { | ||
| 1300 | status = -EFAULT; | ||
| 1301 | } | ||
| 1302 | } | ||
| 1270 | } | 1303 | } |
| 1271 | } | 1304 | } |
| 1272 | /* Restore the original argv[0] */ | 1305 | /* Restore the original argv[0] */ |
| @@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | |||
| 1319 | { | 1352 | { |
| 1320 | u32 va_align; | 1353 | u32 va_align; |
| 1321 | u32 pa_align; | 1354 | u32 pa_align; |
| 1355 | struct dmm_object *dmm_mgr; | ||
| 1322 | u32 size_align; | 1356 | u32 size_align; |
| 1323 | int status = 0; | 1357 | int status = 0; |
| 1324 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | 1358 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; |
| 1325 | struct dmm_map_object *map_obj; | 1359 | struct dmm_map_object *map_obj; |
| 1360 | u32 tmp_addr = 0; | ||
| 1326 | 1361 | ||
| 1327 | #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK | 1362 | #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK |
| 1328 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { | 1363 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { |
| @@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | |||
| 1347 | } | 1382 | } |
| 1348 | /* Critical section */ | 1383 | /* Critical section */ |
| 1349 | mutex_lock(&proc_lock); | 1384 | mutex_lock(&proc_lock); |
| 1385 | dmm_get_handle(p_proc_object, &dmm_mgr); | ||
| 1386 | if (dmm_mgr) | ||
| 1387 | status = dmm_map_memory(dmm_mgr, va_align, size_align); | ||
| 1388 | else | ||
| 1389 | status = -EFAULT; | ||
| 1350 | 1390 | ||
| 1351 | /* Add mapping to the page tables. */ | 1391 | /* Add mapping to the page tables. */ |
| 1352 | if (!status) { | 1392 | if (!status) { |
| 1393 | |||
| 1394 | /* Mapped address = MSB of VA | LSB of PA */ | ||
| 1395 | tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); | ||
| 1353 | /* mapped memory resource tracking */ | 1396 | /* mapped memory resource tracking */ |
| 1354 | map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, | 1397 | map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, |
| 1355 | size_align); | 1398 | size_align); |
| 1356 | if (!map_obj) { | 1399 | if (!map_obj) |
| 1357 | status = -ENOMEM; | 1400 | status = -ENOMEM; |
| 1358 | } else { | 1401 | else |
| 1359 | va_align = user_to_dsp_map( | 1402 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) |
| 1360 | p_proc_object->hbridge_context->dsp_mmu, | 1403 | (p_proc_object->hbridge_context, pa_align, va_align, |
| 1361 | pa_align, va_align, size_align, | 1404 | size_align, ul_map_attr, map_obj->pages); |
| 1362 | map_obj->pages); | ||
| 1363 | if (IS_ERR_VALUE(va_align)) | ||
| 1364 | status = (int)va_align; | ||
| 1365 | } | ||
| 1366 | } | 1405 | } |
| 1367 | if (!status) { | 1406 | if (!status) { |
| 1368 | /* Mapped address = MSB of VA | LSB of PA */ | 1407 | /* Mapped address = MSB of VA | LSB of PA */ |
| 1369 | map_obj->dsp_addr = (va_align | | 1408 | *pp_map_addr = (void *) tmp_addr; |
| 1370 | ((u32)pmpu_addr & (PG_SIZE4K - 1))); | ||
| 1371 | *pp_map_addr = (void *)map_obj->dsp_addr; | ||
| 1372 | } else { | 1409 | } else { |
| 1373 | remove_mapping_information(pr_ctxt, va_align); | 1410 | remove_mapping_information(pr_ctxt, tmp_addr, size_align); |
| 1411 | dmm_un_map_memory(dmm_mgr, va_align, &size_align); | ||
| 1374 | } | 1412 | } |
| 1375 | mutex_unlock(&proc_lock); | 1413 | mutex_unlock(&proc_lock); |
| 1376 | 1414 | ||
| @@ -1463,6 +1501,55 @@ func_end: | |||
| 1463 | } | 1501 | } |
| 1464 | 1502 | ||
| 1465 | /* | 1503 | /* |
| 1504 | * ======== proc_reserve_memory ======== | ||
| 1505 | * Purpose: | ||
| 1506 | * Reserve a virtually contiguous region of DSP address space. | ||
| 1507 | */ | ||
| 1508 | int proc_reserve_memory(void *hprocessor, u32 ul_size, | ||
| 1509 | void **pp_rsv_addr, | ||
| 1510 | struct process_context *pr_ctxt) | ||
| 1511 | { | ||
| 1512 | struct dmm_object *dmm_mgr; | ||
| 1513 | int status = 0; | ||
| 1514 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
| 1515 | struct dmm_rsv_object *rsv_obj; | ||
| 1516 | |||
| 1517 | if (!p_proc_object) { | ||
| 1518 | status = -EFAULT; | ||
| 1519 | goto func_end; | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
| 1523 | if (!dmm_mgr) { | ||
| 1524 | status = -EFAULT; | ||
| 1525 | goto func_end; | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); | ||
| 1529 | if (status != 0) | ||
| 1530 | goto func_end; | ||
| 1531 | |||
| 1532 | /* | ||
| 1533 | * A successful reserve should be followed by insertion of rsv_obj | ||
| 1534 | * into dmm_rsv_list, so that reserved memory resource tracking | ||
| 1535 | * remains uptodate | ||
| 1536 | */ | ||
| 1537 | rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); | ||
| 1538 | if (rsv_obj) { | ||
| 1539 | rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; | ||
| 1540 | spin_lock(&pr_ctxt->dmm_rsv_lock); | ||
| 1541 | list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); | ||
| 1542 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | ||
| 1543 | } | ||
| 1544 | |||
| 1545 | func_end: | ||
| 1546 | dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " | ||
| 1547 | "status 0x%x\n", __func__, hprocessor, | ||
| 1548 | ul_size, pp_rsv_addr, status); | ||
| 1549 | return status; | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | /* | ||
| 1466 | * ======== proc_start ======== | 1553 | * ======== proc_start ======== |
| 1467 | * Purpose: | 1554 | * Purpose: |
| 1468 | * Start a processor running. | 1555 | * Start a processor running. |
| @@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr, | |||
| 1610 | { | 1697 | { |
| 1611 | int status = 0; | 1698 | int status = 0; |
| 1612 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | 1699 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; |
| 1700 | struct dmm_object *dmm_mgr; | ||
| 1613 | u32 va_align; | 1701 | u32 va_align; |
| 1702 | u32 size_align; | ||
| 1614 | 1703 | ||
| 1615 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); | 1704 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); |
| 1616 | if (!p_proc_object) { | 1705 | if (!p_proc_object) { |
| @@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr, | |||
| 1618 | goto func_end; | 1707 | goto func_end; |
| 1619 | } | 1708 | } |
| 1620 | 1709 | ||
| 1710 | status = dmm_get_handle(hprocessor, &dmm_mgr); | ||
| 1711 | if (!dmm_mgr) { | ||
| 1712 | status = -EFAULT; | ||
| 1713 | goto func_end; | ||
| 1714 | } | ||
| 1715 | |||
| 1621 | /* Critical section */ | 1716 | /* Critical section */ |
| 1622 | mutex_lock(&proc_lock); | 1717 | mutex_lock(&proc_lock); |
| 1718 | /* | ||
| 1719 | * Update DMM structures. Get the size to unmap. | ||
| 1720 | * This function returns error if the VA is not mapped | ||
| 1721 | */ | ||
| 1722 | status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); | ||
| 1623 | /* Remove mapping from the page tables. */ | 1723 | /* Remove mapping from the page tables. */ |
| 1624 | status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, | 1724 | if (!status) { |
| 1625 | va_align); | 1725 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) |
| 1726 | (p_proc_object->hbridge_context, va_align, size_align); | ||
| 1727 | } | ||
| 1626 | 1728 | ||
| 1627 | mutex_unlock(&proc_lock); | 1729 | mutex_unlock(&proc_lock); |
| 1628 | if (status) | 1730 | if (status) |
| @@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr, | |||
| 1633 | * from dmm_map_list, so that mapped memory resource tracking | 1735 | * from dmm_map_list, so that mapped memory resource tracking |
| 1634 | * remains uptodate | 1736 | * remains uptodate |
| 1635 | */ | 1737 | */ |
| 1636 | remove_mapping_information(pr_ctxt, (u32) map_addr); | 1738 | remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); |
| 1637 | 1739 | ||
| 1638 | func_end: | 1740 | func_end: |
| 1639 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", | 1741 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", |
| @@ -1642,6 +1744,55 @@ func_end: | |||
| 1642 | } | 1744 | } |
| 1643 | 1745 | ||
| 1644 | /* | 1746 | /* |
| 1747 | * ======== proc_un_reserve_memory ======== | ||
| 1748 | * Purpose: | ||
| 1749 | * Frees a previously reserved region of DSP address space. | ||
| 1750 | */ | ||
| 1751 | int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, | ||
| 1752 | struct process_context *pr_ctxt) | ||
| 1753 | { | ||
| 1754 | struct dmm_object *dmm_mgr; | ||
| 1755 | int status = 0; | ||
| 1756 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
| 1757 | struct dmm_rsv_object *rsv_obj; | ||
| 1758 | |||
| 1759 | if (!p_proc_object) { | ||
| 1760 | status = -EFAULT; | ||
| 1761 | goto func_end; | ||
| 1762 | } | ||
| 1763 | |||
| 1764 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
| 1765 | if (!dmm_mgr) { | ||
| 1766 | status = -EFAULT; | ||
| 1767 | goto func_end; | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); | ||
| 1771 | if (status != 0) | ||
| 1772 | goto func_end; | ||
| 1773 | |||
| 1774 | /* | ||
| 1775 | * A successful unreserve should be followed by removal of rsv_obj | ||
| 1776 | * from dmm_rsv_list, so that reserved memory resource tracking | ||
| 1777 | * remains uptodate | ||
| 1778 | */ | ||
| 1779 | spin_lock(&pr_ctxt->dmm_rsv_lock); | ||
| 1780 | list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { | ||
| 1781 | if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { | ||
| 1782 | list_del(&rsv_obj->link); | ||
| 1783 | kfree(rsv_obj); | ||
| 1784 | break; | ||
| 1785 | } | ||
| 1786 | } | ||
| 1787 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | ||
| 1788 | |||
| 1789 | func_end: | ||
| 1790 | dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", | ||
| 1791 | __func__, hprocessor, prsv_addr, status); | ||
| 1792 | return status; | ||
| 1793 | } | ||
| 1794 | |||
| 1795 | /* | ||
| 1645 | * ======== = proc_monitor ======== == | 1796 | * ======== = proc_monitor ======== == |
| 1646 | * Purpose: | 1797 | * Purpose: |
| 1647 | * Place the Processor in Monitor State. This is an internal | 1798 | * Place the Processor in Monitor State. This is an internal |
