aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/pvr/sgx/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/pvr/sgx/mmu.c')
-rw-r--r--drivers/gpu/pvr/sgx/mmu.c2940
1 files changed, 2940 insertions, 0 deletions
diff --git a/drivers/gpu/pvr/sgx/mmu.c b/drivers/gpu/pvr/sgx/mmu.c
new file mode 100644
index 00000000000..8cf6682c247
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/mmu.c
@@ -0,0 +1,2940 @@
1/**********************************************************************
2 *
3 * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful but, except
10 * as otherwise stated in writing, without any warranty; without even the
11 * implied warranty of merchantability or fitness for a particular purpose.
12 * See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * Imagination Technologies Ltd. <gpl-support@imgtec.com>
23 * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
24 *
25 ******************************************************************************/
26
27#include "sgxdefs.h"
28#include "sgxmmu.h"
29#include "services_headers.h"
30#include "buffer_manager.h"
31#include "hash.h"
32#include "ra.h"
33#include "pdump_km.h"
34#include "sgxapi_km.h"
35#include "sgxinfo.h"
36#include "sgxinfokm.h"
37#include "mmu.h"
38#include "sgxconfig.h"
39#include "sgx_bridge_km.h"
40
41#define UINT32_MAX_VALUE 0xFFFFFFFFUL
42
43#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
44
45typedef struct _MMU_PT_INFO_
46{
47
48 IMG_VOID *hPTPageOSMemHandle;
49 IMG_CPU_VIRTADDR PTPageCpuVAddr;
50 IMG_UINT32 ui32ValidPTECount;
51} MMU_PT_INFO;
52
53struct _MMU_CONTEXT_
54{
55
56 PVRSRV_DEVICE_NODE *psDeviceNode;
57
58
59 IMG_CPU_VIRTADDR pvPDCpuVAddr;
60 IMG_DEV_PHYADDR sPDDevPAddr;
61
62 IMG_VOID *hPDOSMemHandle;
63
64
65 MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
66
67 PVRSRV_SGXDEV_INFO *psDevInfo;
68
69#if defined(PDUMP)
70 IMG_UINT32 ui32PDumpMMUContextID;
71#endif
72
73 struct _MMU_CONTEXT_ *psNext;
74};
75
76struct _MMU_HEAP_
77{
78
79 MMU_CONTEXT *psMMUContext;
80
81
82
83
84 IMG_UINT32 ui32PDBaseIndex;
85
86 IMG_UINT32 ui32PageTableCount;
87
88 IMG_UINT32 ui32PTETotal;
89
90 IMG_UINT32 ui32PDEPageSizeCtrl;
91
92
93
94
95 IMG_UINT32 ui32DataPageSize;
96
97 IMG_UINT32 ui32DataPageBitWidth;
98
99 IMG_UINT32 ui32DataPageMask;
100
101
102
103
104 IMG_UINT32 ui32PTShift;
105
106 IMG_UINT32 ui32PTBitWidth;
107
108 IMG_UINT32 ui32PTMask;
109
110 IMG_UINT32 ui32PTSize;
111
112 IMG_UINT32 ui32PTECount;
113
114
115
116
117 IMG_UINT32 ui32PDShift;
118
119 IMG_UINT32 ui32PDBitWidth;
120
121 IMG_UINT32 ui32PDMask;
122
123
124
125 RA_ARENA *psVMArena;
126 DEV_ARENA_DESCRIPTOR *psDevArena;
127#if defined(PDUMP)
128 PDUMP_MMU_ATTRIB sMMUAttrib;
129#endif
130};
131
132
133
134#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
135#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
136#endif
137
138#if defined(PDUMP)
139static IMG_VOID
140MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
141 IMG_DEV_VIRTADDR DevVAddr,
142 IMG_SIZE_T uSize,
143 IMG_BOOL bForUnmap,
144 IMG_HANDLE hUniqueTag);
145#endif
146
147#define PAGE_TEST 0
148#if PAGE_TEST
149static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
150#endif
151
152#define PT_DEBUG 0
153#if PT_DEBUG
154static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
155{
156 IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
157 IMG_UINT32 i;
158
159
160 for(i = 0; i < 1024; i += 8)
161 {
162 PVR_DPF((PVR_DBG_WARNING,
163 "%08X %08X %08X %08X %08X %08X %08X %08X\n",
164 p[i + 0], p[i + 1], p[i + 2], p[i + 3],
165 p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
166 }
167}
168
169static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
170{
171 IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
172 IMG_UINT32 i, ui32Count = 0;
173
174
175 for(i = 0; i < 1024; i++)
176 if(p[i] & SGX_MMU_PTE_VALID)
177 ui32Count++;
178
179 if(psPTInfoList->ui32ValidPTECount != ui32Count)
180 {
181 PVR_DPF((PVR_DBG_WARNING, "ui32ValidPTECount: %u ui32Count: %u\n",
182 psPTInfoList->ui32ValidPTECount, ui32Count));
183 DumpPT(psPTInfoList);
184 BUG();
185 }
186}
187#else
188static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
189{
190 PVR_UNREFERENCED_PARAMETER(psPTInfoList);
191}
192
193static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
194{
195 PVR_UNREFERENCED_PARAMETER(psPTInfoList);
196}
197#endif
198
199#ifdef SUPPORT_SGX_MMU_BYPASS
200IMG_VOID
201EnableHostAccess (MMU_CONTEXT *psMMUContext)
202{
203 IMG_UINT32 ui32RegVal;
204 IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
205
206
207
208
209 ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
210
211 OSWriteHWReg(pvRegsBaseKM,
212 EUR_CR_BIF_CTRL,
213 ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
214
215 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
216}
217
218IMG_VOID
219DisableHostAccess (MMU_CONTEXT *psMMUContext)
220{
221 IMG_UINT32 ui32RegVal;
222 IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
223
224
225
226
227
228 OSWriteHWReg(pvRegsBaseKM,
229 EUR_CR_BIF_CTRL,
230 ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
231
232 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, 0);
233}
234#endif
235
236
237#if defined(SGX_FEATURE_SYSTEM_CACHE)
238static IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
239{
240 #if defined(SGX_FEATURE_MP)
241 psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
242 #else
243
244 PVR_UNREFERENCED_PARAMETER(psDevInfo);
245 #endif
246}
247#endif
248
249IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
250{
251 psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
252 #if defined(SGX_FEATURE_SYSTEM_CACHE)
253 MMU_InvalidateSystemLevelCache(psDevInfo);
254 #endif
255}
256
257
258static IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
259{
260 psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT;
261 #if defined(SGX_FEATURE_SYSTEM_CACHE)
262 MMU_InvalidateSystemLevelCache(psDevInfo);
263 #endif
264}
265
266
267static IMG_BOOL
268_AllocPageTableMemory (MMU_HEAP *pMMUHeap,
269 MMU_PT_INFO *psPTInfoList,
270 IMG_DEV_PHYADDR *psDevPAddr)
271{
272 IMG_DEV_PHYADDR sDevPAddr;
273 IMG_CPU_PHYADDR sCpuPAddr;
274
275
276
277
278 if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
279 {
280
281 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
282 pMMUHeap->ui32PTSize,
283 SGX_MMU_PAGE_SIZE,
284 (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
285 &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
286 {
287 PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
288 return IMG_FALSE;
289 }
290
291
292 if(psPTInfoList->PTPageCpuVAddr)
293 {
294 sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
295 psPTInfoList->PTPageCpuVAddr);
296 }
297 else
298 {
299
300 sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
301 }
302
303 sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
304 }
305 else
306 {
307 IMG_SYS_PHYADDR sSysPAddr;
308
309
310
311
312
313 if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
314 SGX_MMU_PAGE_SIZE,
315 IMG_NULL,
316 IMG_NULL,
317 0,
318 SGX_MMU_PAGE_SIZE,
319 0,
320 &(sSysPAddr.uiAddr))!= IMG_TRUE)
321 {
322 PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
323 return IMG_FALSE;
324 }
325
326
327 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
328
329 psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
330 SGX_MMU_PAGE_SIZE,
331 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
332 &psPTInfoList->hPTPageOSMemHandle);
333 if(!psPTInfoList->PTPageCpuVAddr)
334 {
335 PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
336 return IMG_FALSE;
337 }
338
339
340 sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
341
342 #if PAGE_TEST
343 PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
344 #endif
345 }
346
347#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
348 {
349 IMG_UINT32 *pui32Tmp;
350 IMG_UINT32 i;
351
352 pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
353
354 for(i=0; i<pMMUHeap->ui32PTECount; i++)
355 {
356 pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
357 | SGX_MMU_PTE_VALID;
358 }
359 }
360#else
361
362 OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
363#endif
364
365
366 PDUMPMALLOCPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, psPTInfoList->hPTPageOSMemHandle, 0, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
367
368 PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfoList->hPTPageOSMemHandle, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
369
370
371 *psDevPAddr = sDevPAddr;
372
373 return IMG_TRUE;
374}
375
376
377static IMG_VOID
378_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
379{
380
381
382
383
384 if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
385 {
386
387 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
388 pMMUHeap->ui32PTSize,
389 psPTInfoList->PTPageCpuVAddr,
390 psPTInfoList->hPTPageOSMemHandle);
391 }
392 else
393 {
394 IMG_SYS_PHYADDR sSysPAddr;
395 IMG_CPU_PHYADDR sCpuPAddr;
396
397
398 sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
399 psPTInfoList->PTPageCpuVAddr);
400 sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
401
402
403
404 OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
405 SGX_MMU_PAGE_SIZE,
406 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
407 psPTInfoList->hPTPageOSMemHandle);
408
409
410
411
412 RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
413 }
414}
415
416
417
418static IMG_VOID
419_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
420{
421 IMG_UINT32 *pui32PDEntry;
422 IMG_UINT32 i;
423 IMG_UINT32 ui32PDIndex;
424 SYS_DATA *psSysData;
425 MMU_PT_INFO **ppsPTInfoList;
426
427 SysAcquireData(&psSysData);
428
429
430 ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
431
432
433 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
434
435 {
436#if PT_DEBUG
437 if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
438 {
439 DumpPT(ppsPTInfoList[ui32PTIndex]);
440
441 }
442#endif
443
444
445 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
446 }
447
448
449 PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
450 if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
451 {
452 PDUMPFREEPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
453 }
454
455 switch(pMMUHeap->psDevArena->DevMemHeapType)
456 {
457 case DEVICE_MEMORY_HEAP_SHARED :
458 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
459 {
460
461 MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
462
463 while(psMMUContext)
464 {
465
466 pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
467 pui32PDEntry += ui32PDIndex;
468
469#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
470
471 pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
472 >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
473 | SGX_MMU_PDE_PAGE_SIZE_4K
474 | SGX_MMU_PDE_VALID;
475#else
476
477 if(bOSFreePT)
478 {
479 pui32PDEntry[ui32PTIndex] = 0;
480 }
481#endif
482
483
484 PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
485
486
487 psMMUContext = psMMUContext->psNext;
488 }
489 break;
490 }
491 case DEVICE_MEMORY_HEAP_PERCONTEXT :
492 case DEVICE_MEMORY_HEAP_KERNEL :
493 {
494
495 pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
496 pui32PDEntry += ui32PDIndex;
497
498#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
499
500 pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
501 >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
502 | SGX_MMU_PDE_PAGE_SIZE_4K
503 | SGX_MMU_PDE_VALID;
504#else
505
506 if(bOSFreePT)
507 {
508 pui32PDEntry[ui32PTIndex] = 0;
509 }
510#endif
511
512
513 PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
514 break;
515 }
516 default:
517 {
518 PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
519 return;
520 }
521 }
522
523
524 if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
525 {
526 if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
527 {
528 IMG_PUINT32 pui32Tmp;
529
530 pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
531
532
533 for(i=0;
534 (i<pMMUHeap->ui32PTETotal) && (i<pMMUHeap->ui32PTECount);
535 i++)
536 {
537 pui32Tmp[i] = 0;
538 }
539
540
541
542 if(bOSFreePT)
543 {
544 _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
545 }
546
547
548
549
550 pMMUHeap->ui32PTETotal -= i;
551 }
552 else
553 {
554
555 pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
556 }
557
558 if(bOSFreePT)
559 {
560
561 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
562 sizeof(MMU_PT_INFO),
563 ppsPTInfoList[ui32PTIndex],
564 IMG_NULL);
565 ppsPTInfoList[ui32PTIndex] = IMG_NULL;
566 }
567 }
568 else
569 {
570
571 pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
572 }
573
574 PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
575}
576
577static IMG_VOID
578_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
579{
580 IMG_UINT32 i;
581#if defined(PDUMP)
582 PDUMPCOMMENT("Free PTs (MMU Context ID == %u, PDBaseIndex == %u, PT count == 0x%x)",
583 pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
584 pMMUHeap->ui32PDBaseIndex,
585 pMMUHeap->ui32PageTableCount);
586#endif
587 for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
588 {
589 _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
590 }
591 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
592}
593
594
595static IMG_BOOL
596_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
597{
598 IMG_UINT32 ui32PageTableCount;
599 IMG_UINT32 ui32PDIndex;
600 IMG_UINT32 i;
601 IMG_UINT32 *pui32PDEntry;
602 MMU_PT_INFO **ppsPTInfoList;
603 SYS_DATA *psSysData;
604 IMG_DEV_VIRTADDR sHighDevVAddr;
605
606
607#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
608 PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
609#endif
610
611
612 SysAcquireData(&psSysData);
613
614
615 ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
616
617
618
619 if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
620 < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
621 {
622
623 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
624 }
625 else
626 {
627 sHighDevVAddr.uiAddr = DevVAddr.uiAddr
628 + ui32Size
629 + pMMUHeap->ui32DataPageMask
630 + pMMUHeap->ui32PTMask;
631 }
632
633 ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
634
635 ui32PageTableCount -= ui32PDIndex;
636
637
638 pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
639 pui32PDEntry += ui32PDIndex;
640
641
642 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
643
644#if defined(PDUMP)
645 PDUMPCOMMENT("Alloc PTs (MMU Context ID == %u, PDBaseIndex == %u, Size == 0x%x)",
646 pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
647 pMMUHeap->ui32PDBaseIndex,
648 ui32Size);
649 PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PageTableCount);
650 PDUMPCOMMENT("Page directory mods (page count == %08X)", ui32PageTableCount);
651#endif
652
653 for(i=0; i<ui32PageTableCount; i++)
654 {
655 if(ppsPTInfoList[i] == IMG_NULL)
656 {
657 OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
658 sizeof (MMU_PT_INFO),
659 (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
660 "MMU Page Table Info");
661 if (ppsPTInfoList[i] == IMG_NULL)
662 {
663 PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
664 return IMG_FALSE;
665 }
666 OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
667 }
668
669 if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
670 && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
671 {
672 IMG_DEV_PHYADDR sDevPAddr;
673#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
674 IMG_UINT32 *pui32Tmp;
675 IMG_UINT32 j;
676#else
677
678 PVR_ASSERT(pui32PDEntry[i] == 0);
679#endif
680
681 if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
682 {
683 PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
684 return IMG_FALSE;
685 }
686
687 switch(pMMUHeap->psDevArena->DevMemHeapType)
688 {
689 case DEVICE_MEMORY_HEAP_SHARED :
690 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
691 {
692
693 MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
694
695 while(psMMUContext)
696 {
697
698 pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
699 pui32PDEntry += ui32PDIndex;
700
701
702 pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
703 | pMMUHeap->ui32PDEPageSizeCtrl
704 | SGX_MMU_PDE_VALID;
705
706
707 PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
708
709
710 psMMUContext = psMMUContext->psNext;
711 }
712 break;
713 }
714 case DEVICE_MEMORY_HEAP_PERCONTEXT :
715 case DEVICE_MEMORY_HEAP_KERNEL :
716 {
717
718 pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
719 | pMMUHeap->ui32PDEPageSizeCtrl
720 | SGX_MMU_PDE_VALID;
721
722
723 PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
724 break;
725 }
726 default:
727 {
728 PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
729 return IMG_FALSE;
730 }
731 }
732
733#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
734
735
736
737
738 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
739#endif
740 }
741 else
742 {
743
744 PVR_ASSERT(pui32PDEntry[i] != 0);
745 }
746 }
747
748 #if defined(SGX_FEATURE_SYSTEM_CACHE)
749 MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
750 #endif
751
752 return IMG_TRUE;
753}
754
755
756#if defined(PDUMP)
757IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext)
758{
759 BM_CONTEXT *pBMContext = hDevMemContext;
760 PVR_ASSERT(pBMContext);
761 return pBMContext->psMMUContext->ui32PDumpMMUContextID;
762}
763
764static IMG_VOID MMU_SetPDumpAttribs(PDUMP_MMU_ATTRIB *psMMUAttrib,
765 PVRSRV_DEVICE_NODE *psDeviceNode,
766 IMG_UINT32 ui32DataPageMask,
767 IMG_UINT32 ui32PTSize)
768{
769
770 psMMUAttrib->sDevId = psDeviceNode->sDevId;
771
772 psMMUAttrib->pszPDRegRegion = IMG_NULL;
773 psMMUAttrib->ui32DataPageMask = ui32DataPageMask;
774
775 psMMUAttrib->ui32PTEValid = SGX_MMU_PTE_VALID;
776 psMMUAttrib->ui32PTSize = ui32PTSize;
777 psMMUAttrib->ui32PTEAlignShift = SGX_MMU_PTE_ADDR_ALIGNSHIFT;
778
779 psMMUAttrib->ui32PDEMask = SGX_MMU_PDE_ADDR_MASK;
780 psMMUAttrib->ui32PDEAlignShift = SGX_MMU_PDE_ADDR_ALIGNSHIFT;
781}
782#endif
783
784PVRSRV_ERROR
785MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
786{
787 IMG_UINT32 *pui32Tmp;
788 IMG_UINT32 i;
789 IMG_CPU_VIRTADDR pvPDCpuVAddr;
790 IMG_DEV_PHYADDR sPDDevPAddr;
791 IMG_CPU_PHYADDR sCpuPAddr;
792 MMU_CONTEXT *psMMUContext;
793 IMG_HANDLE hPDOSMemHandle;
794 SYS_DATA *psSysData;
795 PVRSRV_SGXDEV_INFO *psDevInfo;
796#if defined(PDUMP)
797 PDUMP_MMU_ATTRIB sMMUAttrib;
798#endif
799 PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
800
801 SysAcquireData(&psSysData);
802#if defined(PDUMP)
803
804
805 MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
806 SGX_MMU_PAGE_MASK,
807 SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
808#endif
809
810 OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
811 sizeof (MMU_CONTEXT),
812 (IMG_VOID **)&psMMUContext, IMG_NULL,
813 "MMU Context");
814 if (psMMUContext == IMG_NULL)
815 {
816 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
817 return PVRSRV_ERROR_OUT_OF_MEMORY;
818 }
819 OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
820
821
822 psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
823 psMMUContext->psDevInfo = psDevInfo;
824
825
826 psMMUContext->psDeviceNode = psDeviceNode;
827
828
829 if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
830 {
831 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
832 SGX_MMU_PAGE_SIZE,
833 SGX_MMU_PAGE_SIZE,
834 &pvPDCpuVAddr,
835 &hPDOSMemHandle) != PVRSRV_OK)
836 {
837 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
838 return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
839 }
840
841 if(pvPDCpuVAddr)
842 {
843 sCpuPAddr = OSMapLinToCPUPhys(hPDOSMemHandle,
844 pvPDCpuVAddr);
845 }
846 else
847 {
848
849 sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
850 }
851 sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
852
853 #if PAGE_TEST
854 PageTest(pvPDCpuVAddr, sPDDevPAddr);
855 #endif
856
857#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
858
859 if(!psDevInfo->pvMMUContextList)
860 {
861
862 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
863 SGX_MMU_PAGE_SIZE,
864 SGX_MMU_PAGE_SIZE,
865 &psDevInfo->pvDummyPTPageCpuVAddr,
866 &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
867 {
868 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
869 return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
870 }
871
872 if(psDevInfo->pvDummyPTPageCpuVAddr)
873 {
874 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
875 psDevInfo->pvDummyPTPageCpuVAddr);
876 }
877 else
878 {
879
880 sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
881 }
882 psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
883
884
885 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
886 SGX_MMU_PAGE_SIZE,
887 SGX_MMU_PAGE_SIZE,
888 &psDevInfo->pvDummyDataPageCpuVAddr,
889 &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
890 {
891 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
892 return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
893 }
894
895 if(psDevInfo->pvDummyDataPageCpuVAddr)
896 {
897 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
898 psDevInfo->pvDummyDataPageCpuVAddr);
899 }
900 else
901 {
902 sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
903 }
904 psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
905 }
906#endif
907 }
908 else
909 {
910 IMG_SYS_PHYADDR sSysPAddr;
911
912
913 if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
914 SGX_MMU_PAGE_SIZE,
915 IMG_NULL,
916 IMG_NULL,
917 0,
918 SGX_MMU_PAGE_SIZE,
919 0,
920 &(sSysPAddr.uiAddr))!= IMG_TRUE)
921 {
922 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
923 return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
924 }
925
926
927 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
928 sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
929 pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
930 SGX_MMU_PAGE_SIZE,
931 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
932 &hPDOSMemHandle);
933 if(!pvPDCpuVAddr)
934 {
935 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
936 return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
937 }
938
939 #if PAGE_TEST
940 PageTest(pvPDCpuVAddr, sPDDevPAddr);
941 #endif
942
943#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
944
945 if(!psDevInfo->pvMMUContextList)
946 {
947
948 if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
949 SGX_MMU_PAGE_SIZE,
950 IMG_NULL,
951 IMG_NULL,
952 0,
953 SGX_MMU_PAGE_SIZE,
954 0,
955 &(sSysPAddr.uiAddr))!= IMG_TRUE)
956 {
957 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
958 return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
959 }
960
961
962 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
963 psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
964 psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
965 SGX_MMU_PAGE_SIZE,
966 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
967 &psDevInfo->hDummyPTPageOSMemHandle);
968 if(!psDevInfo->pvDummyPTPageCpuVAddr)
969 {
970 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
971 return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
972 }
973
974
975 if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
976 SGX_MMU_PAGE_SIZE,
977 IMG_NULL,
978 IMG_NULL,
979 0,
980 SGX_MMU_PAGE_SIZE,
981 0,
982 &(sSysPAddr.uiAddr))!= IMG_TRUE)
983 {
984 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
985 return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
986 }
987
988
989 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
990 psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
991 psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
992 SGX_MMU_PAGE_SIZE,
993 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
994 &psDevInfo->hDummyDataPageOSMemHandle);
995 if(!psDevInfo->pvDummyDataPageCpuVAddr)
996 {
997 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
998 return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
999 }
1000 }
1001#endif
1002 }
1003
1004#if defined(PDUMP)
1005
1006#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
1007 PDUMPCOMMENT("Alloc page directory for new MMU context (PDDevPAddr == 0x%08x)",
1008 sPDDevPAddr.uiAddr);
1009#else
1010 PDUMPCOMMENT("Alloc page directory for new MMU context, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)",
1011 sPDDevPAddr.uiHighAddr, sPDDevPAddr.uiAddr);
1012#endif
1013#endif
1014#ifdef SUPPORT_SGX_MMU_BYPASS
1015 EnableHostAccess(psMMUContext);
1016#endif
1017
1018 if (pvPDCpuVAddr)
1019 {
1020 pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
1021 }
1022 else
1023 {
1024 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
1025 return PVRSRV_ERROR_INVALID_CPU_ADDR;
1026 }
1027
1028 PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDOSMemHandle, 0, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
1029
1030#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1031
1032 for(i=0; i<SGX_MMU_PD_SIZE; i++)
1033 {
1034 pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
1035 | SGX_MMU_PDE_PAGE_SIZE_4K
1036 | SGX_MMU_PDE_VALID;
1037 }
1038
1039 if(!psDevInfo->pvMMUContextList)
1040 {
1041
1042
1043
1044 pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
1045 for(i=0; i<SGX_MMU_PT_SIZE; i++)
1046 {
1047 pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
1048 | SGX_MMU_PTE_VALID;
1049 }
1050
1051 PDUMPCOMMENT("Dummy Page table contents");
1052 PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyPTOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1053
1054
1055
1056 pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
1057 for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
1058 {
1059 pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
1060 }
1061
1062 PDUMPCOMMENT("Dummy Data Page contents");
1063 PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1064 }
1065#else
1066
1067 for(i=0; i<SGX_MMU_PD_SIZE; i++)
1068 {
1069
1070 pui32Tmp[i] = 0;
1071 }
1072#endif
1073
1074
1075 PDUMPCOMMENT("Page directory contents");
1076 PDUMPMEMPTENTRIES(&sMMUAttrib, hPDOSMemHandle, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1077
1078
1079#if defined(PDUMP)
1080 {
1081 PVRSRV_ERROR eError;
1082
1083 IMG_UINT32 ui32MMUType = 1;
1084
1085 #if defined(SGX_FEATURE_36BIT_MMU)
1086 ui32MMUType = 3;
1087 #else
1088 #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
1089 ui32MMUType = 2;
1090 #endif
1091 #endif
1092
1093 eError = PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
1094 psDeviceNode->sDevId.pszPDumpDevName,
1095 &psMMUContext->ui32PDumpMMUContextID,
1096 ui32MMUType,
1097 PDUMP_PT_UNIQUETAG,
1098 hPDOSMemHandle,
1099 pvPDCpuVAddr);
1100 if (eError != PVRSRV_OK)
1101 {
1102 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
1103 return eError;
1104 }
1105 }
1106
1107
1108 PDUMPCOMMENT("Set MMU context complete (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
1109#endif
1110
1111
1112 psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
1113 psMMUContext->sPDDevPAddr = sPDDevPAddr;
1114 psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
1115
1116
1117 *ppsMMUContext = psMMUContext;
1118
1119
1120 *psPDDevPAddr = sPDDevPAddr;
1121
1122
1123 psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
1124 psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
1125
1126#ifdef SUPPORT_SGX_MMU_BYPASS
1127 DisableHostAccess(psMMUContext);
1128#endif
1129
1130 return PVRSRV_OK;
1131}
1132
1133IMG_VOID
1134MMU_Finalise (MMU_CONTEXT *psMMUContext)
1135{
1136 IMG_UINT32 *pui32Tmp, i;
1137 SYS_DATA *psSysData;
1138 MMU_CONTEXT **ppsMMUContext;
1139#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1140 PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
1141 MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
1142#endif
1143
1144 SysAcquireData(&psSysData);
1145
1146#if defined(PDUMP)
1147
1148 PDUMPCOMMENT("Clear MMU context (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
1149 PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->psDeviceNode->sDevId.pszPDumpDevName, psMMUContext->ui32PDumpMMUContextID, 2);
1150
1151
1152#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
1153 PDUMPCOMMENT("Free page directory (PDDevPAddr == 0x%08x)",
1154 psMMUContext->sPDDevPAddr.uiAddr);
1155#else
1156 PDUMPCOMMENT("Free page directory, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)",
1157 psMMUContext->sPDDevPAddr.uiHighAddr, psMMUContext->sPDDevPAddr.uiAddr);
1158#endif
1159#endif
1160
1161 PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psMMUContext->hPDOSMemHandle, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
1162#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1163 PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
1164 PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
1165#endif
1166
1167 pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
1168
1169
1170 for(i=0; i<SGX_MMU_PD_SIZE; i++)
1171 {
1172
1173 pui32Tmp[i] = 0;
1174 }
1175
1176
1177
1178
1179
1180 if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
1181 {
1182 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1183 SGX_MMU_PAGE_SIZE,
1184 psMMUContext->pvPDCpuVAddr,
1185 psMMUContext->hPDOSMemHandle);
1186
1187#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1188
1189 if(!psMMUContextList->psNext)
1190 {
1191 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1192 SGX_MMU_PAGE_SIZE,
1193 psDevInfo->pvDummyPTPageCpuVAddr,
1194 psDevInfo->hDummyPTPageOSMemHandle);
1195 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1196 SGX_MMU_PAGE_SIZE,
1197 psDevInfo->pvDummyDataPageCpuVAddr,
1198 psDevInfo->hDummyDataPageOSMemHandle);
1199 }
1200#endif
1201 }
1202 else
1203 {
1204 IMG_SYS_PHYADDR sSysPAddr;
1205 IMG_CPU_PHYADDR sCpuPAddr;
1206
1207
1208 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->hPDOSMemHandle,
1209 psMMUContext->pvPDCpuVAddr);
1210 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
1211
1212
1213 OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
1214 SGX_MMU_PAGE_SIZE,
1215 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
1216 psMMUContext->hPDOSMemHandle);
1217
1218 RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
1219
1220#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1221
1222 if(!psMMUContextList->psNext)
1223 {
1224
1225 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
1226 psDevInfo->pvDummyPTPageCpuVAddr);
1227 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
1228
1229
1230 OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
1231 SGX_MMU_PAGE_SIZE,
1232 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
1233 psDevInfo->hDummyPTPageOSMemHandle);
1234
1235 RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
1236
1237
1238 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyDataPageOSMemHandle,
1239 psDevInfo->pvDummyDataPageCpuVAddr);
1240 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
1241
1242
1243 OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
1244 SGX_MMU_PAGE_SIZE,
1245 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
1246 psDevInfo->hDummyDataPageOSMemHandle);
1247
1248 RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
1249 }
1250#endif
1251 }
1252
1253 PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
1254
1255
1256 ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
1257 while(*ppsMMUContext)
1258 {
1259 if(*ppsMMUContext == psMMUContext)
1260 {
1261
1262 *ppsMMUContext = psMMUContext->psNext;
1263 break;
1264 }
1265
1266
1267 ppsMMUContext = &((*ppsMMUContext)->psNext);
1268 }
1269
1270
1271 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
1272
1273}
1274
1275
1276IMG_VOID
1277MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
1278{
1279 IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
1280 IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
1281 IMG_UINT32 ui32PDEntry;
1282#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
1283 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
1284#endif
1285
1286
1287 pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
1288 pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
1289
1290
1291
1292
1293#if defined(PDUMP)
1294 PDUMPCOMMENT("Page directory shared heap range copy");
1295 PDUMPCOMMENT(" (Source heap MMU Context ID == %u, PT count == 0x%x)",
1296 psMMUHeap->psMMUContext->ui32PDumpMMUContextID,
1297 psMMUHeap->ui32PageTableCount);
1298 PDUMPCOMMENT(" (Destination MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
1299#endif
1300#ifdef SUPPORT_SGX_MMU_BYPASS
1301 EnableHostAccess(psMMUContext);
1302#endif
1303
1304 for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
1305 {
1306#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1307
1308 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
1309#endif
1310
1311
1312 pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
1313 if (pui32PDCpuVAddr[ui32PDEntry])
1314 {
1315 PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1316
1317#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
1318 bInvalidateDirectoryCache = IMG_TRUE;
1319#endif
1320 }
1321 }
1322
1323#ifdef SUPPORT_SGX_MMU_BYPASS
1324 DisableHostAccess(psMMUContext);
1325#endif
1326
1327#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
1328 if (bInvalidateDirectoryCache)
1329 {
1330
1331
1332
1333
1334 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
1335 }
1336#endif
1337}
1338
1339
1340static IMG_VOID
1341MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
1342 IMG_DEV_VIRTADDR sDevVAddr,
1343 IMG_UINT32 ui32PageCount,
1344 IMG_HANDLE hUniqueTag)
1345{
1346 IMG_DEV_VIRTADDR sTmpDevVAddr;
1347 IMG_UINT32 i;
1348 IMG_UINT32 ui32PDIndex;
1349 IMG_UINT32 ui32PTIndex;
1350 IMG_UINT32 *pui32Tmp;
1351 IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
1352
1353#if !defined (PDUMP)
1354 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1355#endif
1356
1357 sTmpDevVAddr = sDevVAddr;
1358
1359 for(i=0; i<ui32PageCount; i++)
1360 {
1361 MMU_PT_INFO **ppsPTInfoList;
1362
1363
1364 ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
1365
1366
1367 ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
1368
1369 {
1370
1371 ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
1372
1373
1374 if (!ppsPTInfoList[0])
1375 {
1376 PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
1377
1378
1379 sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
1380
1381
1382 continue;
1383 }
1384
1385
1386 pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
1387
1388
1389 if (!pui32Tmp)
1390 {
1391 continue;
1392 }
1393
1394 CheckPT(ppsPTInfoList[0]);
1395
1396
1397 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
1398 {
1399 ppsPTInfoList[0]->ui32ValidPTECount--;
1400 }
1401 else
1402 {
1403 PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
1404 }
1405
1406
1407 PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
1408
1409#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1410
1411 pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
1412 | SGX_MMU_PTE_VALID;
1413#else
1414
1415 pui32Tmp[ui32PTIndex] = 0;
1416#endif
1417
1418 CheckPT(ppsPTInfoList[0]);
1419 }
1420
1421
1422
1423 if (ppsPTInfoList[0] && ppsPTInfoList[0]->ui32ValidPTECount == 0)
1424 {
1425 _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
1426 bInvalidateDirectoryCache = IMG_TRUE;
1427 }
1428
1429
1430 sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
1431 }
1432
1433 if(bInvalidateDirectoryCache)
1434 {
1435 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
1436 }
1437 else
1438 {
1439 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
1440 }
1441
1442#if defined(PDUMP)
1443 MMU_PDumpPageTables(psMMUHeap,
1444 sDevVAddr,
1445 psMMUHeap->ui32DataPageSize * ui32PageCount,
1446 IMG_TRUE,
1447 hUniqueTag);
1448#endif
1449}
1450
1451
1452static IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
1453 IMG_SIZE_T ui32Start,
1454 IMG_SIZE_T ui32End,
1455 IMG_HANDLE hUniqueTag)
1456{
1457 MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
1458 IMG_DEV_VIRTADDR Start;
1459
1460 Start.uiAddr = ui32Start;
1461
1462 MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (ui32End - ui32Start) >> pMMUHeap->ui32PTShift, hUniqueTag);
1463}
1464
1465MMU_HEAP *
1466MMU_Create (MMU_CONTEXT *psMMUContext,
1467 DEV_ARENA_DESCRIPTOR *psDevArena,
1468 RA_ARENA **ppsVMArena,
1469 PDUMP_MMU_ATTRIB **ppsMMUAttrib)
1470{
1471 MMU_HEAP *pMMUHeap;
1472 IMG_UINT32 ui32ScaleSize;
1473
1474 PVR_UNREFERENCED_PARAMETER(ppsMMUAttrib);
1475
1476 PVR_ASSERT (psDevArena != IMG_NULL);
1477
1478 if (psDevArena == IMG_NULL)
1479 {
1480 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
1481 return IMG_NULL;
1482 }
1483
1484 OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1485 sizeof (MMU_HEAP),
1486 (IMG_VOID **)&pMMUHeap, IMG_NULL,
1487 "MMU Heap");
1488 if (pMMUHeap == IMG_NULL)
1489 {
1490 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
1491 return IMG_NULL;
1492 }
1493
1494 pMMUHeap->psMMUContext = psMMUContext;
1495 pMMUHeap->psDevArena = psDevArena;
1496
1497
1498
1499
1500 switch(pMMUHeap->psDevArena->ui32DataPageSize)
1501 {
1502 case 0x1000:
1503 ui32ScaleSize = 0;
1504 pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
1505 break;
1506#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
1507 case 0x4000:
1508 ui32ScaleSize = 2;
1509 pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
1510 break;
1511 case 0x10000:
1512 ui32ScaleSize = 4;
1513 pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
1514 break;
1515 case 0x40000:
1516 ui32ScaleSize = 6;
1517 pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
1518 break;
1519 case 0x100000:
1520 ui32ScaleSize = 8;
1521 pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
1522 break;
1523 case 0x400000:
1524 ui32ScaleSize = 10;
1525 pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
1526 break;
1527#endif
1528 default:
1529 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
1530 goto ErrorFreeHeap;
1531 }
1532
1533
1534 pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
1535 pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
1536 pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
1537
1538 pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
1539 pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
1540 pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
1541 pMMUHeap->ui32PTSize = (IMG_UINT32)(1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
1542
1543 if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
1544 {
1545 pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
1546 }
1547 pMMUHeap->ui32PTECount = pMMUHeap->ui32PTSize >> 2;
1548
1549
1550 pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
1551 pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
1552 pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
1553
1554
1555
1556
1557
1558 if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
1559 {
1560
1561
1562
1563 PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
1564 & (pMMUHeap->ui32DataPageMask
1565 | pMMUHeap->ui32PTMask)) == 0);
1566 }
1567
1568
1569 pMMUHeap->ui32PTETotal = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
1570
1571
1572 pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
1573
1574
1575
1576
1577 pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotal + pMMUHeap->ui32PTECount - 1)
1578 >> pMMUHeap->ui32PTBitWidth;
1579
1580
1581 pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
1582 psDevArena->BaseDevVAddr.uiAddr,
1583 psDevArena->ui32Size,
1584 IMG_NULL,
1585 MAX(HOST_PAGESIZE(), pMMUHeap->ui32DataPageSize),
1586 IMG_NULL,
1587 IMG_NULL,
1588 &MMU_FreePageTables,
1589 pMMUHeap);
1590
1591 if (pMMUHeap->psVMArena == IMG_NULL)
1592 {
1593 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
1594 goto ErrorFreePagetables;
1595 }
1596
1597#if defined(PDUMP)
1598
1599 MMU_SetPDumpAttribs(&pMMUHeap->sMMUAttrib,
1600 psMMUContext->psDeviceNode,
1601 pMMUHeap->ui32DataPageMask,
1602 pMMUHeap->ui32PTSize);
1603 *ppsMMUAttrib = &pMMUHeap->sMMUAttrib;
1604
1605 PDUMPCOMMENT("Create MMU device from arena %s (Size == 0x%x, DataPageSize == 0x%x, BaseDevVAddr == 0x%x)",
1606 psDevArena->pszName,
1607 psDevArena->ui32Size,
1608 pMMUHeap->ui32DataPageSize,
1609 psDevArena->BaseDevVAddr.uiAddr);
1610#endif
1611
1612#if 0
1613
1614 if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
1615 {
1616 IMG_UINT32 ui32RegVal;
1617 IMG_UINT32 ui32XTileStride;
1618
1619
1620
1621
1622
1623
1624 ui32XTileStride = 2;
1625
1626 ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
1627 & ((psDevArena->BaseDevVAddr.uiAddr>>20)
1628 << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
1629 |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
1630 & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
1631 << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
1632 |(EUR_CR_BIF_TILE0_CFG_MASK
1633 & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
1634 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_TILE0, ui32RegVal);
1635 }
1636#endif
1637
1638
1639
1640 *ppsVMArena = pMMUHeap->psVMArena;
1641
1642 return pMMUHeap;
1643
1644
1645ErrorFreePagetables:
1646 _DeferredFreePageTables (pMMUHeap);
1647
1648ErrorFreeHeap:
1649 OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
1650
1651
1652 return IMG_NULL;
1653}
1654
1655IMG_VOID
1656MMU_Delete (MMU_HEAP *pMMUHeap)
1657{
1658 if (pMMUHeap != IMG_NULL)
1659 {
1660 PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
1661
1662 if(pMMUHeap->psVMArena)
1663 {
1664 RA_Delete (pMMUHeap->psVMArena);
1665 }
1666
1667#if defined(PDUMP)
1668 PDUMPCOMMENT("Delete MMU device from arena %s (BaseDevVAddr == 0x%x, PT count for deferred free == 0x%x)",
1669 pMMUHeap->psDevArena->pszName,
1670 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr,
1671 pMMUHeap->ui32PageTableCount);
1672#endif
1673
1674#ifdef SUPPORT_SGX_MMU_BYPASS
1675 EnableHostAccess(pMMUHeap->psMMUContext);
1676#endif
1677 _DeferredFreePageTables (pMMUHeap);
1678#ifdef SUPPORT_SGX_MMU_BYPASS
1679 DisableHostAccess(pMMUHeap->psMMUContext);
1680#endif
1681
1682 OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
1683
1684 }
1685}
1686
1687IMG_BOOL
1688MMU_Alloc (MMU_HEAP *pMMUHeap,
1689 IMG_SIZE_T uSize,
1690 IMG_SIZE_T *pActualSize,
1691 IMG_UINT32 uFlags,
1692 IMG_UINT32 uDevVAddrAlignment,
1693 IMG_DEV_VIRTADDR *psDevVAddr)
1694{
1695 IMG_BOOL bStatus;
1696
1697 PVR_DPF ((PVR_DBG_MESSAGE,
1698 "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
1699 uSize, uFlags, uDevVAddrAlignment));
1700
1701
1702
1703 if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
1704 {
1705 IMG_UINTPTR_T uiAddr;
1706
1707 bStatus = RA_Alloc (pMMUHeap->psVMArena,
1708 uSize,
1709 pActualSize,
1710 IMG_NULL,
1711 0,
1712 uDevVAddrAlignment,
1713 0,
1714 &uiAddr);
1715 if(!bStatus)
1716 {
1717 PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
1718 PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Alloc of DevVAddr failed from heap %s ID%d",
1719 pMMUHeap->psDevArena->pszName,
1720 pMMUHeap->psDevArena->ui32HeapID));
1721 return bStatus;
1722 }
1723
1724 psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
1725 }
1726
1727 #ifdef SUPPORT_SGX_MMU_BYPASS
1728 EnableHostAccess(pMMUHeap->psMMUContext);
1729 #endif
1730
1731
1732 bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
1733
1734 #ifdef SUPPORT_SGX_MMU_BYPASS
1735 DisableHostAccess(pMMUHeap->psMMUContext);
1736 #endif
1737
1738 if (!bStatus)
1739 {
1740 PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
1741 PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to alloc pagetable(s) for DevVAddr 0x%8.8x from heap %s ID%d",
1742 psDevVAddr->uiAddr,
1743 pMMUHeap->psDevArena->pszName,
1744 pMMUHeap->psDevArena->ui32HeapID));
1745 if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
1746 {
1747
1748 RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
1749 }
1750 }
1751
1752 return bStatus;
1753}
1754
1755IMG_VOID
1756MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
1757{
1758 PVR_ASSERT (pMMUHeap != IMG_NULL);
1759
1760 if (pMMUHeap == IMG_NULL)
1761 {
1762 PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
1763 return;
1764 }
1765
1766 PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%08X from heap %s ID%d",
1767 DevVAddr.uiAddr,
1768 pMMUHeap->psDevArena->pszName,
1769 pMMUHeap->psDevArena->ui32HeapID));
1770
1771 if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
1772 (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
1773 {
1774 RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
1775 return;
1776 }
1777
1778 PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't free DevVAddr %08X from heap %s ID%d (not in range of heap))",
1779 DevVAddr.uiAddr,
1780 pMMUHeap->psDevArena->pszName,
1781 pMMUHeap->psDevArena->ui32HeapID));
1782}
1783
1784IMG_VOID
1785MMU_Enable (MMU_HEAP *pMMUHeap)
1786{
1787 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
1788
1789}
1790
1791IMG_VOID
1792MMU_Disable (MMU_HEAP *pMMUHeap)
1793{
1794 PVR_UNREFERENCED_PARAMETER(pMMUHeap);
1795
1796}
1797
1798#if defined(PDUMP)
1799static IMG_VOID
1800MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
1801 IMG_DEV_VIRTADDR DevVAddr,
1802 IMG_SIZE_T uSize,
1803 IMG_BOOL bForUnmap,
1804 IMG_HANDLE hUniqueTag)
1805{
1806 IMG_UINT32 ui32NumPTEntries;
1807 IMG_UINT32 ui32PTIndex;
1808 IMG_UINT32 *pui32PTEntry;
1809
1810 MMU_PT_INFO **ppsPTInfoList;
1811 IMG_UINT32 ui32PDIndex;
1812 IMG_UINT32 ui32PTDumpCount;
1813
1814
1815 ui32NumPTEntries = (uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift;
1816
1817
1818 ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1819
1820
1821 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
1822
1823
1824 ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
1825
1826
1827 PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
1828
1829
1830 while(ui32NumPTEntries > 0)
1831 {
1832 MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
1833
1834 if(ui32NumPTEntries <= pMMUHeap->ui32PTECount - ui32PTIndex)
1835 {
1836 ui32PTDumpCount = ui32NumPTEntries;
1837 }
1838 else
1839 {
1840 ui32PTDumpCount = pMMUHeap->ui32PTECount - ui32PTIndex;
1841 }
1842
1843 if (psPTInfo)
1844 {
1845 pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
1846 PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
1847 }
1848
1849
1850 ui32NumPTEntries -= ui32PTDumpCount;
1851
1852
1853 ui32PTIndex = 0;
1854 }
1855
1856 PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
1857}
1858#endif
1859
1860
1861static IMG_VOID
1862MMU_MapPage (MMU_HEAP *pMMUHeap,
1863 IMG_DEV_VIRTADDR DevVAddr,
1864 IMG_DEV_PHYADDR DevPAddr,
1865 IMG_UINT32 ui32MemFlags)
1866{
1867 IMG_UINT32 ui32Index;
1868 IMG_UINT32 *pui32Tmp;
1869 IMG_UINT32 ui32MMUFlags = 0;
1870 MMU_PT_INFO **ppsPTInfoList;
1871
1872
1873 PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
1874
1875
1876
1877 if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
1878 {
1879
1880 ui32MMUFlags = 0;
1881 }
1882 else if(PVRSRV_MEM_READ & ui32MemFlags)
1883 {
1884
1885 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
1886 }
1887 else if(PVRSRV_MEM_WRITE & ui32MemFlags)
1888 {
1889
1890 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
1891 }
1892
1893
1894 if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
1895 {
1896 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
1897 }
1898
1899#if !defined(FIX_HW_BRN_25503)
1900
1901 if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
1902 {
1903 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
1904 }
1905#endif
1906
1907
1908
1909
1910
1911 ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1912
1913
1914 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
1915
1916 CheckPT(ppsPTInfoList[0]);
1917
1918
1919 ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
1920
1921
1922 pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
1923
1924#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1925 {
1926 IMG_UINT32 uTmp = pui32Tmp[ui32Index];
1927
1928
1929 if (uTmp & SGX_MMU_PTE_VALID)
1930 {
1931 PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08X PDIdx:%u PTIdx:%u",
1932 DevVAddr.uiAddr,
1933 DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
1934 ui32Index ));
1935 PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08X", uTmp));
1936 PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08X", DevPAddr.uiAddr));
1937 }
1938
1939 PVR_ASSERT((uTmp & SGX_MMU_PTE_VALID) == 0);
1940 }
1941#endif
1942
1943
1944 ppsPTInfoList[0]->ui32ValidPTECount++;
1945
1946
1947 pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
1948 & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
1949 | SGX_MMU_PTE_VALID
1950 | ui32MMUFlags;
1951
1952 CheckPT(ppsPTInfoList[0]);
1953}
1954
1955
1956IMG_VOID
1957MMU_MapScatter (MMU_HEAP *pMMUHeap,
1958 IMG_DEV_VIRTADDR DevVAddr,
1959 IMG_SYS_PHYADDR *psSysAddr,
1960 IMG_SIZE_T uSize,
1961 IMG_UINT32 ui32MemFlags,
1962 IMG_HANDLE hUniqueTag)
1963{
1964#if defined(PDUMP)
1965 IMG_DEV_VIRTADDR MapBaseDevVAddr;
1966#endif
1967 IMG_UINT32 uCount, i;
1968 IMG_DEV_PHYADDR DevPAddr;
1969
1970 PVR_ASSERT (pMMUHeap != IMG_NULL);
1971
1972#if defined(PDUMP)
1973 MapBaseDevVAddr = DevVAddr;
1974#else
1975 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
1976#endif
1977
1978 for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize)
1979 {
1980 IMG_SYS_PHYADDR sSysAddr;
1981
1982 sSysAddr = psSysAddr[i];
1983
1984
1985
1986 PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
1987
1988 DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
1989
1990 MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
1991 DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
1992
1993 PVR_DPF ((PVR_DBG_MESSAGE,
1994 "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
1995 DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
1996 }
1997
1998#if defined(PDUMP)
1999 MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
2000#endif
2001}
2002
2003IMG_VOID
2004MMU_MapPages (MMU_HEAP *pMMUHeap,
2005 IMG_DEV_VIRTADDR DevVAddr,
2006 IMG_SYS_PHYADDR SysPAddr,
2007 IMG_SIZE_T uSize,
2008 IMG_UINT32 ui32MemFlags,
2009 IMG_HANDLE hUniqueTag)
2010{
2011 IMG_DEV_PHYADDR DevPAddr;
2012#if defined(PDUMP)
2013 IMG_DEV_VIRTADDR MapBaseDevVAddr;
2014#endif
2015 IMG_UINT32 uCount;
2016 IMG_UINT32 ui32VAdvance;
2017 IMG_UINT32 ui32PAdvance;
2018
2019 PVR_ASSERT (pMMUHeap != IMG_NULL);
2020
2021 PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPages: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=%08X, size=0x%x",
2022 pMMUHeap->psDevArena->pszName,
2023 pMMUHeap->psDevArena->ui32HeapID,
2024 DevVAddr.uiAddr,
2025 SysPAddr.uiAddr,
2026 uSize));
2027
2028
2029 ui32VAdvance = pMMUHeap->ui32DataPageSize;
2030 ui32PAdvance = pMMUHeap->ui32DataPageSize;
2031
2032#if defined(PDUMP)
2033 MapBaseDevVAddr = DevVAddr;
2034#else
2035 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
2036#endif
2037
2038 DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
2039
2040
2041 PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
2042
2043#if defined(FIX_HW_BRN_23281)
2044 if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
2045 {
2046 ui32VAdvance *= 2;
2047 }
2048#endif
2049
2050
2051
2052
2053 if(ui32MemFlags & PVRSRV_MEM_DUMMY)
2054 {
2055 ui32PAdvance = 0;
2056 }
2057
2058 for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
2059 {
2060 MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
2061 DevVAddr.uiAddr += ui32VAdvance;
2062 DevPAddr.uiAddr += ui32PAdvance;
2063 }
2064
2065#if defined(PDUMP)
2066 MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
2067#endif
2068}
2069
2070IMG_VOID
2071MMU_MapShadow (MMU_HEAP *pMMUHeap,
2072 IMG_DEV_VIRTADDR MapBaseDevVAddr,
2073 IMG_SIZE_T uByteSize,
2074 IMG_CPU_VIRTADDR CpuVAddr,
2075 IMG_HANDLE hOSMemHandle,
2076 IMG_DEV_VIRTADDR *pDevVAddr,
2077 IMG_UINT32 ui32MemFlags,
2078 IMG_HANDLE hUniqueTag)
2079{
2080 IMG_UINT32 i;
2081 IMG_UINT32 uOffset = 0;
2082 IMG_DEV_VIRTADDR MapDevVAddr;
2083 IMG_UINT32 ui32VAdvance;
2084 IMG_UINT32 ui32PAdvance;
2085
2086#if !defined (PDUMP)
2087 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
2088#endif
2089
2090 PVR_DPF ((PVR_DBG_MESSAGE,
2091 "MMU_MapShadow: DevVAddr:%08X, Bytes:0x%x, CPUVAddr:%08X",
2092 MapBaseDevVAddr.uiAddr,
2093 uByteSize,
2094 (IMG_UINTPTR_T)CpuVAddr));
2095
2096
2097 ui32VAdvance = pMMUHeap->ui32DataPageSize;
2098 ui32PAdvance = pMMUHeap->ui32DataPageSize;
2099
2100
2101 PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
2102 PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
2103 pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
2104
2105#if defined(FIX_HW_BRN_23281)
2106 if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
2107 {
2108 ui32VAdvance *= 2;
2109 }
2110#endif
2111
2112
2113
2114
2115 if(ui32MemFlags & PVRSRV_MEM_DUMMY)
2116 {
2117 ui32PAdvance = 0;
2118 }
2119
2120
2121 MapDevVAddr = MapBaseDevVAddr;
2122 for (i=0; i<uByteSize; i+=ui32VAdvance)
2123 {
2124 IMG_CPU_PHYADDR CpuPAddr;
2125 IMG_DEV_PHYADDR DevPAddr;
2126
2127 if(CpuVAddr)
2128 {
2129 CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle,
2130 (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset));
2131 }
2132 else
2133 {
2134 CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
2135 }
2136 DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
2137
2138
2139 PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
2140
2141 PVR_DPF ((PVR_DBG_MESSAGE,
2142 "Offset=0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
2143 uOffset,
2144 (IMG_UINTPTR_T)CpuVAddr + uOffset,
2145 CpuPAddr.uiAddr,
2146 MapDevVAddr.uiAddr,
2147 DevPAddr.uiAddr));
2148
2149 MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
2150
2151
2152 MapDevVAddr.uiAddr += ui32VAdvance;
2153 uOffset += ui32PAdvance;
2154 }
2155
2156#if defined(PDUMP)
2157 MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
2158#endif
2159}
2160
2161
2162IMG_VOID
2163MMU_UnmapPages (MMU_HEAP *psMMUHeap,
2164 IMG_DEV_VIRTADDR sDevVAddr,
2165 IMG_UINT32 ui32PageCount,
2166 IMG_HANDLE hUniqueTag)
2167{
2168 IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize;
2169 IMG_DEV_VIRTADDR sTmpDevVAddr;
2170 IMG_UINT32 i;
2171 IMG_UINT32 ui32PDIndex;
2172 IMG_UINT32 ui32PTIndex;
2173 IMG_UINT32 *pui32Tmp;
2174
2175#if !defined (PDUMP)
2176 PVR_UNREFERENCED_PARAMETER(hUniqueTag);
2177#endif
2178
2179
2180 sTmpDevVAddr = sDevVAddr;
2181
2182 for(i=0; i<ui32PageCount; i++)
2183 {
2184 MMU_PT_INFO **ppsPTInfoList;
2185
2186
2187 ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
2188
2189
2190 ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
2191
2192
2193 ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
2194
2195
2196 if (!ppsPTInfoList[0])
2197 {
2198 PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
2199 sTmpDevVAddr.uiAddr,
2200 sDevVAddr.uiAddr,
2201 i,
2202 ui32PDIndex,
2203 ui32PTIndex));
2204
2205
2206 sTmpDevVAddr.uiAddr += uPageSize;
2207
2208
2209 continue;
2210 }
2211
2212 CheckPT(ppsPTInfoList[0]);
2213
2214
2215 pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
2216
2217
2218 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
2219 {
2220 ppsPTInfoList[0]->ui32ValidPTECount--;
2221 }
2222 else
2223 {
2224 PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
2225 sTmpDevVAddr.uiAddr,
2226 sDevVAddr.uiAddr,
2227 i,
2228 ui32PDIndex,
2229 ui32PTIndex));
2230 PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08X", pui32Tmp[ui32PTIndex]));
2231 }
2232
2233
2234 PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
2235
2236#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2237
2238 pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
2239 | SGX_MMU_PTE_VALID;
2240#else
2241
2242 pui32Tmp[ui32PTIndex] = 0;
2243#endif
2244
2245 CheckPT(ppsPTInfoList[0]);
2246
2247
2248 sTmpDevVAddr.uiAddr += uPageSize;
2249 }
2250
2251 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
2252
2253#if defined(PDUMP)
2254 MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
2255#endif
2256}
2257
2258
2259IMG_DEV_PHYADDR
2260MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
2261{
2262 IMG_UINT32 *pui32PageTable;
2263 IMG_UINT32 ui32Index;
2264 IMG_DEV_PHYADDR sDevPAddr;
2265 MMU_PT_INFO **ppsPTInfoList;
2266
2267
2268 ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
2269
2270
2271 ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
2272 if (!ppsPTInfoList[0])
2273 {
2274 PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
2275 sDevPAddr.uiAddr = 0;
2276 return sDevPAddr;
2277 }
2278
2279
2280 ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
2281
2282
2283 pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
2284
2285
2286 sDevPAddr.uiAddr = pui32PageTable[ui32Index];
2287
2288
2289 sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
2290
2291
2292 sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
2293
2294 return sDevPAddr;
2295}
2296
2297
2298IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
2299{
2300 return (pMMUContext->sPDDevPAddr);
2301}
2302
2303
2304IMG_EXPORT
2305PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
2306 IMG_DEV_VIRTADDR sDevVAddr,
2307 IMG_DEV_PHYADDR *pDevPAddr,
2308 IMG_CPU_PHYADDR *pCpuPAddr)
2309{
2310 MMU_HEAP *pMMUHeap;
2311 IMG_DEV_PHYADDR DevPAddr;
2312
2313
2314
2315 pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
2316
2317 DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
2318 pCpuPAddr->uiAddr = DevPAddr.uiAddr;
2319 pDevPAddr->uiAddr = DevPAddr.uiAddr;
2320
2321 return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
2322}
2323
2324
2325PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
2326 IMG_HANDLE hDevMemContext,
2327 IMG_DEV_PHYADDR *psPDDevPAddr)
2328{
2329 if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
2330 {
2331 return PVRSRV_ERROR_INVALID_PARAMS;
2332 }
2333
2334
2335 *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
2336
2337 return PVRSRV_OK;
2338}
2339
2340PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
2341{
2342 PVRSRV_ERROR eError;
2343 SYS_DATA *psSysData;
2344 RA_ARENA *psLocalDevMemArena;
2345 IMG_HANDLE hOSMemHandle = IMG_NULL;
2346 IMG_BYTE *pui8MemBlock = IMG_NULL;
2347 IMG_SYS_PHYADDR sMemBlockSysPAddr;
2348 IMG_CPU_PHYADDR sMemBlockCpuPAddr;
2349
2350 SysAcquireData(&psSysData);
2351
2352 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
2353
2354
2355 if(psLocalDevMemArena == IMG_NULL)
2356 {
2357
2358 eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2359 3 * SGX_MMU_PAGE_SIZE,
2360 SGX_MMU_PAGE_SIZE,
2361 (IMG_VOID **)&pui8MemBlock,
2362 &hOSMemHandle);
2363 if (eError != PVRSRV_OK)
2364 {
2365 PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
2366 return eError;
2367 }
2368
2369
2370 if(pui8MemBlock)
2371 {
2372 sMemBlockCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle,
2373 pui8MemBlock);
2374 }
2375 else
2376 {
2377
2378 sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
2379 }
2380 }
2381 else
2382 {
2383
2384
2385 if(RA_Alloc(psLocalDevMemArena,
2386 3 * SGX_MMU_PAGE_SIZE,
2387 IMG_NULL,
2388 IMG_NULL,
2389 0,
2390 SGX_MMU_PAGE_SIZE,
2391 0,
2392 &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
2393 {
2394 PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
2395 return PVRSRV_ERROR_OUT_OF_MEMORY;
2396 }
2397
2398
2399 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
2400 pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
2401 SGX_MMU_PAGE_SIZE * 3,
2402 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2403 &hOSMemHandle);
2404 if(!pui8MemBlock)
2405 {
2406 PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
2407 return PVRSRV_ERROR_BAD_MAPPING;
2408 }
2409 }
2410
2411 psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
2412 psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
2413 psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
2414 psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
2415
2416
2417 psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
2418 psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
2419
2420
2421 OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
2422 OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
2423
2424 OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
2425
2426 return PVRSRV_OK;
2427}
2428
2429IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
2430{
2431 SYS_DATA *psSysData;
2432 RA_ARENA *psLocalDevMemArena;
2433 IMG_SYS_PHYADDR sPDSysPAddr;
2434
2435 SysAcquireData(&psSysData);
2436
2437 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
2438
2439
2440 if(psLocalDevMemArena == IMG_NULL)
2441 {
2442 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2443 3 * SGX_MMU_PAGE_SIZE,
2444 psDevInfo->pui32BIFResetPD,
2445 psDevInfo->hBIFResetPDOSMemHandle);
2446 }
2447 else
2448 {
2449 OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
2450 3 * SGX_MMU_PAGE_SIZE,
2451 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2452 psDevInfo->hBIFResetPDOSMemHandle);
2453
2454 sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
2455 RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
2456 }
2457}
2458
2459
2460#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
2461PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_DEVICE_NODE *psDeviceNode)
2462{
2463 PVRSRV_ERROR eError;
2464 SYS_DATA *psSysData;
2465 RA_ARENA *psLocalDevMemArena;
2466 IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
2467 IMG_HANDLE hPDPageOSMemHandle = IMG_NULL;
2468 IMG_UINT32 *pui32PD = IMG_NULL;
2469 IMG_UINT32 *pui32PT = IMG_NULL;
2470 IMG_CPU_PHYADDR sCpuPAddr;
2471 IMG_DEV_PHYADDR sPTDevPAddr;
2472 IMG_DEV_PHYADDR sPDDevPAddr;
2473 PVRSRV_SGXDEV_INFO *psDevInfo;
2474 IMG_UINT32 ui32PDOffset;
2475 IMG_UINT32 ui32PTOffset;
2476
2477 psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
2478
2479 SysAcquireData(&psSysData);
2480
2481 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
2482
2483
2484 if(psLocalDevMemArena == IMG_NULL)
2485 {
2486
2487 eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2488 SGX_MMU_PAGE_SIZE,
2489 SGX_MMU_PAGE_SIZE,
2490 (IMG_VOID **)&pui32PT,
2491 &hPTPageOSMemHandle);
2492 if (eError != PVRSRV_OK)
2493 {
2494 PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
2495 return eError;
2496 }
2497 ui32PTOffset = 0;
2498
2499 eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2500 SGX_MMU_PAGE_SIZE,
2501 SGX_MMU_PAGE_SIZE,
2502 (IMG_VOID **)&pui32PD,
2503 &hPDPageOSMemHandle);
2504 if (eError != PVRSRV_OK)
2505 {
2506 PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
2507 return eError;
2508 }
2509 ui32PDOffset = 0;
2510
2511
2512 if(pui32PT)
2513 {
2514 sCpuPAddr = OSMapLinToCPUPhys(hPTPageOSMemHandle,
2515 pui32PT);
2516 }
2517 else
2518 {
2519
2520 sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
2521 }
2522 sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
2523
2524 if(pui32PD)
2525 {
2526 sCpuPAddr = OSMapLinToCPUPhys(hPDPageOSMemHandle,
2527 pui32PD);
2528 }
2529 else
2530 {
2531
2532 sCpuPAddr = OSMemHandleToCpuPAddr(hPDPageOSMemHandle, 0);
2533 }
2534 sPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
2535
2536 }
2537 else
2538 {
2539
2540
2541 if(RA_Alloc(psLocalDevMemArena,
2542 SGX_MMU_PAGE_SIZE * 2,
2543 IMG_NULL,
2544 IMG_NULL,
2545 0,
2546 SGX_MMU_PAGE_SIZE,
2547 0,
2548 &(psDevInfo->sBRN22997SysPAddr.uiAddr))!= IMG_TRUE)
2549 {
2550 PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to RA_Alloc failed"));
2551 return PVRSRV_ERROR_OUT_OF_MEMORY;
2552 }
2553
2554
2555 sCpuPAddr = SysSysPAddrToCpuPAddr(psDevInfo->sBRN22997SysPAddr);
2556 pui32PT = OSMapPhysToLin(sCpuPAddr,
2557 SGX_MMU_PAGE_SIZE * 2,
2558 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2559 &hPTPageOSMemHandle);
2560 if(!pui32PT)
2561 {
2562 PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR failed to map page tables"));
2563 return PVRSRV_ERROR_BAD_MAPPING;
2564 }
2565 ui32PTOffset = 0;
2566
2567
2568 sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
2569
2570 pui32PD = pui32PT + SGX_MMU_PAGE_SIZE/sizeof(IMG_UINT32);
2571 ui32PDOffset = SGX_MMU_PAGE_SIZE;
2572 hPDPageOSMemHandle = hPTPageOSMemHandle;
2573 sPDDevPAddr.uiAddr = sPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
2574 }
2575
2576 OSMemSet(pui32PD, 0, SGX_MMU_PAGE_SIZE);
2577 OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
2578
2579
2580 PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDPageOSMemHandle, ui32PDOffset, pui32PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
2581 PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPTPageOSMemHandle, ui32PTOffset, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
2582 PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, hPDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2583 PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, hPTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
2584
2585 psDevInfo->hBRN22997PTPageOSMemHandle = hPTPageOSMemHandle;
2586 psDevInfo->hBRN22997PDPageOSMemHandle = hPDPageOSMemHandle;
2587 psDevInfo->sBRN22997PTDevPAddr = sPTDevPAddr;
2588 psDevInfo->sBRN22997PDDevPAddr = sPDDevPAddr;
2589 psDevInfo->pui32BRN22997PD = pui32PD;
2590 psDevInfo->pui32BRN22997PT = pui32PT;
2591
2592 return PVRSRV_OK;
2593}
2594
2595
2596IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo)
2597{
2598 IMG_UINT32 *pui32PD = psDevInfo->pui32BRN22997PD;
2599 IMG_UINT32 *pui32PT = psDevInfo->pui32BRN22997PT;
2600 IMG_UINT32 ui32PDIndex;
2601 IMG_UINT32 ui32PTIndex;
2602 IMG_DEV_VIRTADDR sDevVAddr;
2603 volatile IMG_UINT32 *pui32HostPort;
2604 IMG_UINT32 ui32BIFCtrl;
2605
2606
2607
2608
2609 pui32HostPort = (volatile IMG_UINT32*)(((IMG_UINT8*)psDevInfo->pvHostPortBaseKM) + SYS_SGX_HOSTPORT_BRN23030_OFFSET);
2610
2611
2612 sDevVAddr.uiAddr = SYS_SGX_HOSTPORT_BASE_DEVVADDR + SYS_SGX_HOSTPORT_BRN23030_OFFSET;
2613
2614 ui32PDIndex = (sDevVAddr.uiAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
2615 ui32PTIndex = (sDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
2616
2617
2618 pui32PD[ui32PDIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
2619 | SGX_MMU_PDE_VALID;
2620
2621 pui32PT[ui32PTIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
2622 | SGX_MMU_PTE_VALID;
2623
2624 PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2625 PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
2626
2627
2628 OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
2629 psDevInfo->sBRN22997PDDevPAddr.uiAddr);
2630 PDUMPPDREG(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sBRN22997PDDevPAddr.uiAddr, PDUMP_PD_UNIQUETAG);
2631
2632
2633 ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
2634 OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
2635 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
2636 OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
2637 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl);
2638
2639
2640 if (pui32HostPort)
2641 {
2642
2643 IMG_UINT32 ui32Tmp;
2644 ui32Tmp = *pui32HostPort;
2645 }
2646 else
2647 {
2648 PVR_DPF((PVR_DBG_ERROR,"Host Port not present for BRN22997 workaround"));
2649 }
2650
2651
2652
2653
2654
2655
2656
2657 PDUMPCOMMENT("RDW :SGXMEM:v4:%08X\r\n", sDevVAddr.uiAddr);
2658
2659 PDUMPCOMMENT("SAB :SGXMEM:v4:%08X 4 0 hostport.bin", sDevVAddr.uiAddr);
2660
2661
2662 pui32PD[ui32PDIndex] = 0;
2663 pui32PT[ui32PTIndex] = 0;
2664
2665
2666 PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2667 PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
2668
2669 OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
2670 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
2671 OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
2672 PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl);
2673}
2674
2675
2676IMG_VOID WorkaroundBRN22997Free(PVRSRV_DEVICE_NODE *psDeviceNode)
2677{
2678 SYS_DATA *psSysData;
2679 RA_ARENA *psLocalDevMemArena;
2680 PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
2681
2682
2683 SysAcquireData(&psSysData);
2684
2685 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
2686
2687 PDUMPFREEPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN22997PDPageOSMemHandle, psDevInfo->pui32BRN22997PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
2688 PDUMPFREEPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN22997PTPageOSMemHandle, psDevInfo->pui32BRN22997PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
2689
2690
2691 if(psLocalDevMemArena == IMG_NULL)
2692 {
2693 if (psDevInfo->pui32BRN22997PD != IMG_NULL)
2694 {
2695 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2696 SGX_MMU_PAGE_SIZE,
2697 psDevInfo->pui32BRN22997PD,
2698 psDevInfo->hBRN22997PDPageOSMemHandle);
2699 }
2700
2701 if (psDevInfo->pui32BRN22997PT != IMG_NULL)
2702 {
2703 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2704 SGX_MMU_PAGE_SIZE,
2705 psDevInfo->pui32BRN22997PT,
2706 psDevInfo->hBRN22997PTPageOSMemHandle);
2707 }
2708 }
2709 else
2710 {
2711 if (psDevInfo->pui32BRN22997PT != IMG_NULL)
2712 {
2713 OSUnMapPhysToLin(psDevInfo->pui32BRN22997PT,
2714 SGX_MMU_PAGE_SIZE * 2,
2715 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2716 psDevInfo->hBRN22997PTPageOSMemHandle);
2717
2718
2719 RA_Free(psLocalDevMemArena, psDevInfo->sBRN22997SysPAddr.uiAddr, IMG_FALSE);
2720 }
2721 }
2722}
2723#endif
2724
2725
2726#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
2727PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
2728{
2729 PVRSRV_ERROR eError;
2730 SYS_DATA *psSysData;
2731 RA_ARENA *psLocalDevMemArena;
2732 IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
2733 IMG_UINT32 *pui32PD;
2734 IMG_UINT32 *pui32PT = IMG_NULL;
2735 IMG_CPU_PHYADDR sCpuPAddr;
2736 IMG_DEV_PHYADDR sPTDevPAddr;
2737 PVRSRV_SGXDEV_INFO *psDevInfo;
2738 IMG_UINT32 ui32PDIndex;
2739 IMG_UINT32 ui32PTIndex;
2740
2741 psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
2742 pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
2743
2744 SysAcquireData(&psSysData);
2745
2746 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
2747
2748
2749 if(psLocalDevMemArena == IMG_NULL)
2750 {
2751
2752 eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2753 SGX_MMU_PAGE_SIZE,
2754 SGX_MMU_PAGE_SIZE,
2755 (IMG_VOID **)&pui32PT,
2756 &hPTPageOSMemHandle);
2757 if (eError != PVRSRV_OK)
2758 {
2759 PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to OSAllocPages failed"));
2760 return eError;
2761 }
2762
2763
2764 if(pui32PT)
2765 {
2766 sCpuPAddr = OSMapLinToCPUPhys(hPTPageOSMemHandle,
2767 pui32PT);
2768 }
2769 else
2770 {
2771
2772 sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
2773 }
2774 sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
2775 }
2776 else
2777 {
2778 IMG_SYS_PHYADDR sSysPAddr;
2779
2780
2781 if(RA_Alloc(psLocalDevMemArena,
2782 SGX_MMU_PAGE_SIZE,
2783 IMG_NULL,
2784 IMG_NULL,
2785 0,
2786 SGX_MMU_PAGE_SIZE,
2787 0,
2788 &(sSysPAddr.uiAddr))!= IMG_TRUE)
2789 {
2790 PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to RA_Alloc failed"));
2791 return PVRSRV_ERROR_OUT_OF_MEMORY;
2792 }
2793
2794
2795 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
2796 pui32PT = OSMapPhysToLin(sCpuPAddr,
2797 SGX_MMU_PAGE_SIZE,
2798 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2799 &hPTPageOSMemHandle);
2800 if(!pui32PT)
2801 {
2802 PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR failed to map page tables"));
2803 return PVRSRV_ERROR_BAD_MAPPING;
2804 }
2805
2806
2807 sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
2808
2809
2810 psDevInfo->sExtSystemCacheRegsPTSysPAddr = sSysPAddr;
2811 }
2812
2813 OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
2814
2815 ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
2816 ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
2817
2818
2819 pui32PD[ui32PDIndex] = (sPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
2820 | SGX_MMU_PDE_VALID;
2821
2822 pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
2823 | SGX_MMU_PTE_VALID;
2824
2825
2826 PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevID, hPTPageOSMemHandle, 0, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
2827 PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, hPDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2828 PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, hPTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
2829
2830
2831 psDevInfo->pui32ExtSystemCacheRegsPT = pui32PT;
2832 psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle = hPTPageOSMemHandle;
2833
2834 return PVRSRV_OK;
2835}
2836
2837
2838PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
2839{
2840 SYS_DATA *psSysData;
2841 RA_ARENA *psLocalDevMemArena;
2842 PVRSRV_SGXDEV_INFO *psDevInfo;
2843 IMG_UINT32 ui32PDIndex;
2844 IMG_UINT32 *pui32PD;
2845
2846 psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
2847 pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
2848
2849 SysAcquireData(&psSysData);
2850
2851 psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
2852
2853
2854 ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
2855 pui32PD[ui32PDIndex] = 0;
2856
2857 PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->hPDOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2858 PDUMPFREEPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle, psDevInfo->pui32ExtSystemCacheRegsPT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
2859
2860
2861 if(psLocalDevMemArena == IMG_NULL)
2862 {
2863 if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
2864 {
2865 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2866 SGX_MMU_PAGE_SIZE,
2867 psDevInfo->pui32ExtSystemCacheRegsPT,
2868 psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
2869 }
2870 }
2871 else
2872 {
2873 if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
2874 {
2875 OSUnMapPhysToLin(psDevInfo->pui32ExtSystemCacheRegsPT,
2876 SGX_MMU_PAGE_SIZE,
2877 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2878 psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
2879
2880 RA_Free(psLocalDevMemArena, psDevInfo->sExtSystemCacheRegsPTSysPAddr.uiAddr, IMG_FALSE);
2881 }
2882 }
2883
2884 return PVRSRV_OK;
2885}
2886#endif
2887
2888
2889#if PAGE_TEST
2890static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
2891{
2892 volatile IMG_UINT32 ui32WriteData;
2893 volatile IMG_UINT32 ui32ReadData;
2894 volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
2895 IMG_INT n;
2896 IMG_BOOL bOK=IMG_TRUE;
2897
2898 ui32WriteData = 0xffffffff;
2899
2900 for (n=0; n<1024; n++)
2901 {
2902 pMem32[n] = ui32WriteData;
2903 ui32ReadData = pMem32[n];
2904
2905 if (ui32WriteData != ui32ReadData)
2906 {
2907
2908 PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
2909 PVR_DBG_BREAK;
2910 bOK = IMG_FALSE;
2911 }
2912 }
2913
2914 ui32WriteData = 0;
2915
2916 for (n=0; n<1024; n++)
2917 {
2918 pMem32[n] = ui32WriteData;
2919 ui32ReadData = pMem32[n];
2920
2921 if (ui32WriteData != ui32ReadData)
2922 {
2923
2924 PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
2925 PVR_DBG_BREAK;
2926 bOK = IMG_FALSE;
2927 }
2928 }
2929
2930 if (bOK)
2931 {
2932 PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
2933 }
2934 else
2935 {
2936 PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
2937 }
2938}
2939#endif
2940