summaryrefslogtreecommitdiffstats
path: root/pvr-source/services4/srvkm/devices
diff options
context:
space:
mode:
Diffstat (limited to 'pvr-source/services4/srvkm/devices')
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/mmu.c4600
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/mmu.h501
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/pb.c493
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h279
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxconfig.h481
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h610
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxinit.c3428
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxkick.c899
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxpower.c630
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxreset.c808
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c814
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxutils.c1912
-rw-r--r--pvr-source/services4/srvkm/devices/sgx/sgxutils.h195
13 files changed, 15650 insertions, 0 deletions
diff --git a/pvr-source/services4/srvkm/devices/sgx/mmu.c b/pvr-source/services4/srvkm/devices/sgx/mmu.c
new file mode 100644
index 0000000..44dc824
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/mmu.c
@@ -0,0 +1,4600 @@
+/*************************************************************************/ /*!
+@Title MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "sgxdefs.h"
+#include "sgxmmu.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "hash.h"
+#include "ra.h"
+#include "pdump_km.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "mmu.h"
+#include "sgxconfig.h"
+#include "sgx_bridge_km.h"
+#include "pdump_osfunc.h"
+
+#define UINT32_MAX_VALUE 0xFFFFFFFFUL
+
+/*
+ MMU performs device virtual to physical translation.
+ terminology:
+ page directory (PD)
+ pagetable (PT)
+ data page (DP)
+
+ Incoming 32bit Device Virtual Addresses are deconstructed into 3 fields:
+ ---------------------------------------------------------
+ | PD Index/tag: | PT Index: | DP offset: |
+ | bits 31:22 | bits 21:n | bits (n-1):0 |
+ ---------------------------------------------------------
+ where typically n=12 for a standard 4k DP
+ but n=16 for a 64k DP
+
+ MMU page directory (PD), pagetable (PT) and data page (DP) config:
+ PD:
+ - always one page per address space
+ - up to 4k in size to span 4Gb (32bit)
+ - contains up to 1024 32bit entries
+ - entries are indexed by the top 12 bits of an incoming 32bit device virtual address
+ - the PD entry selected contains the physical address of the PT to
+ perform the next stage of the V to P translation
+
+ PT:
+ - size depends on the DP size, e.g. 4k DPs have 4k PTs but 16k DPs have 1k PTs
+ - each PT always spans 4Mb of device virtual address space irrespective of DP size
+ - number of entries in a PT depend on DP size and ranges from 1024 to 4 entries
+ - entries are indexed by the PT Index field of the device virtual address (21:n)
+ - the PT entry selected contains the physical address of the DP to access
+
+ DP:
+ - size varies from 4k to 4M in multiple of 4 steppings
+ - DP offset field of the device virtual address ((n-1):0) is used as a byte offset
+ to address into the DP itself
+*/
+
+#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
+
+#if defined(FIX_HW_BRN_31620)
+/* Sim doesn't use the address mask */
+#define SGX_MMU_PDE_DUMMY_PAGE (0)//(0x00000020U)
+#define SGX_MMU_PTE_DUMMY_PAGE (0)//(0x00000020U)
+
+/* 4MB adress range per page table */
+#define BRN31620_PT_ADDRESS_RANGE_SHIFT 22
+#define BRN31620_PT_ADDRESS_RANGE_SIZE (1 << BRN31620_PT_ADDRESS_RANGE_SHIFT)
+
+/* 64MB address range per PDE cache line */
+#define BRN31620_PDE_CACHE_FILL_SHIFT 26
+#define BRN31620_PDE_CACHE_FILL_SIZE (1 << BRN31620_PDE_CACHE_FILL_SHIFT)
+#define BRN31620_PDE_CACHE_FILL_MASK (BRN31620_PDE_CACHE_FILL_SIZE - 1)
+
+/* Page Directory Enteries per cache line */
+#define BRN31620_PDES_PER_CACHE_LINE_SHIFT (BRN31620_PDE_CACHE_FILL_SHIFT - BRN31620_PT_ADDRESS_RANGE_SHIFT)
+#define BRN31620_PDES_PER_CACHE_LINE_SIZE (1 << BRN31620_PDES_PER_CACHE_LINE_SHIFT)
+#define BRN31620_PDES_PER_CACHE_LINE_MASK (BRN31620_PDES_PER_CACHE_LINE_SIZE - 1)
+
+/* Macros for working out offset for dummy pages */
+#define BRN31620_DUMMY_PAGE_OFFSET (1 * SGX_MMU_PAGE_SIZE)
+#define BRN31620_DUMMY_PDE_INDEX (BRN31620_DUMMY_PAGE_OFFSET / BRN31620_PT_ADDRESS_RANGE_SIZE)
+#define BRN31620_DUMMY_PTE_INDEX ((BRN31620_DUMMY_PAGE_OFFSET - (BRN31620_DUMMY_PDE_INDEX * BRN31620_PT_ADDRESS_RANGE_SIZE))/SGX_MMU_PAGE_SIZE)
+
+/* Cache number of cache lines */
+#define BRN31620_CACHE_FLUSH_SHIFT (32 - BRN31620_PDE_CACHE_FILL_SHIFT)
+#define BRN31620_CACHE_FLUSH_SIZE (1 << BRN31620_CACHE_FLUSH_SHIFT)
+
+/* Cache line bits in a UINT32 */
+#define BRN31620_CACHE_FLUSH_BITS_SHIFT 5
+#define BRN31620_CACHE_FLUSH_BITS_SIZE (1 << BRN31620_CACHE_FLUSH_BITS_SHIFT)
+#define BRN31620_CACHE_FLUSH_BITS_MASK (BRN31620_CACHE_FLUSH_BITS_SIZE - 1)
+
+/* Cache line index in array */
+#define BRN31620_CACHE_FLUSH_INDEX_BITS (BRN31620_CACHE_FLUSH_SHIFT - BRN31620_CACHE_FLUSH_BITS_SHIFT)
+#define BRN31620_CACHE_FLUSH_INDEX_SIZE (1 << BRN31620_CACHE_FLUSH_INDEX_BITS)
+
+#define BRN31620_DUMMY_PAGE_SIGNATURE 0xFEEBEE01
+#endif
+
+typedef struct _MMU_PT_INFO_
+{
+ /* note: may need a union here to accommodate a PT page address for local memory */
+ IMG_VOID *hPTPageOSMemHandle;
+ IMG_CPU_VIRTADDR PTPageCpuVAddr;
+ /* Map of reserved PTEs.
+ * Reserved PTEs are like "valid" PTEs in that they (and the DevVAddrs they represent)
+ * cannot be assigned to another allocation but their "reserved" status persists through
+ * any amount of mapping and unmapping, until the allocation is finally destroyed.
+ *
+ * Reserved and Valid are independent.
+ * When a PTE is first reserved, it will have Reserved=1 and Valid=0.
+ * When the PTE is actually mapped, it will have Reserved=1 and Valid=1.
+ * When the PTE is unmapped, it will have Reserved=1 and Valid=0.
+ * At this point, the PT will can not be destroyed because although there is
+ * not an active mapping on the PT, it is known a PTE is reserved for use.
+ *
+ * The above sequence of mapping and unmapping may repeat any number of times
+ * until the allocation is unmapped and destroyed which causes the PTE to have
+ * Valid=0 and Reserved=0.
+ */
+ /* Number of PTEs set up.
+ * i.e. have a valid SGX Phys Addr and the "VALID" PTE bit == 1
+ */
+ IMG_UINT32 ui32ValidPTECount;
+} MMU_PT_INFO;
+
+#define MMU_CONTEXT_NAME_SIZE 50
+struct _MMU_CONTEXT_
+{
+ /* the device node */
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ /* Page Directory CPUVirt and DevPhys Addresses */
+ IMG_CPU_VIRTADDR pvPDCpuVAddr;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+
+ IMG_VOID *hPDOSMemHandle;
+
+ /* information about dynamically allocated pagetables */
+ MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
+
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+
+#if defined(PDUMP)
+ IMG_UINT32 ui32PDumpMMUContextID;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ IMG_BOOL bPDumpActive;
+#endif
+#endif
+
+ IMG_UINT32 ui32PID;
+ IMG_CHAR szName[MMU_CONTEXT_NAME_SIZE];
+
+#if defined (FIX_HW_BRN_31620)
+ IMG_UINT32 ui32PDChangeMask[BRN31620_CACHE_FLUSH_INDEX_SIZE];
+ IMG_UINT32 ui32PDCacheRangeRefCount[BRN31620_CACHE_FLUSH_SIZE];
+ MMU_PT_INFO *apsPTInfoListSave[SGX_MAX_PD_ENTRIES];
+#endif
+ struct _MMU_CONTEXT_ *psNext;
+};
+
+struct _MMU_HEAP_
+{
+ /* MMU context */
+ MMU_CONTEXT *psMMUContext;
+
+ /*
+ heap specific details:
+ */
+ /* the Base PD index for the heap */
+ IMG_UINT32 ui32PDBaseIndex;
+ /* number of pagetables in this heap */
+ IMG_UINT32 ui32PageTableCount;
+ /* total number of pagetable entries in this heap which may be mapped to data pages */
+ IMG_UINT32 ui32PTETotalUsable;
+ /* PD entry DP size control field */
+ IMG_UINT32 ui32PDEPageSizeCtrl;
+
+ /*
+ Data Page (DP) Details:
+ */
+ /* size in bytes of a data page */
+ IMG_UINT32 ui32DataPageSize;
+ /* bit width of the data page offset addressing field */
+ IMG_UINT32 ui32DataPageBitWidth;
+ /* bit mask of the data page offset addressing field */
+ IMG_UINT32 ui32DataPageMask;
+
+ /*
+ PageTable (PT) Details:
+ */
+ /* bit shift to base of PT addressing field */
+ IMG_UINT32 ui32PTShift;
+ /* bit width of the PT addressing field */
+ IMG_UINT32 ui32PTBitWidth;
+ /* bit mask of the PT addressing field */
+ IMG_UINT32 ui32PTMask;
+ /* size in bytes of a pagetable */
+ IMG_UINT32 ui32PTSize;
+ /* Allocated PT Entries per PT */
+ IMG_UINT32 ui32PTNumEntriesAllocated;
+ /* Usable PT Entries per PT (may be different to num allocated for 4MB data page) */
+ IMG_UINT32 ui32PTNumEntriesUsable;
+
+ /*
+ PageDirectory Details:
+ */
+ /* bit shift to base of PD addressing field */
+ IMG_UINT32 ui32PDShift;
+ /* bit width of the PD addressing field */
+ IMG_UINT32 ui32PDBitWidth;
+ /* bit mask of the PT addressing field */
+ IMG_UINT32 ui32PDMask;
+
+ /*
+ Arena Info:
+ */
+ RA_ARENA *psVMArena;
+ DEV_ARENA_DESCRIPTOR *psDevArena;
+
+ /* If we have sparse mappings then we can't do PT level sanity checks */
+ IMG_BOOL bHasSparseMappings;
+#if defined(PDUMP)
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+#endif
+};
+
+
+
+#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
+#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
+#endif
+
+/* local prototypes: */
+static IMG_VOID
+_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT);
+
+#if defined(PDUMP)
+static IMG_VOID
+MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_BOOL bForUnmap,
+ IMG_HANDLE hUniqueTag);
+#endif /* #if defined(PDUMP) */
+
+/* This option tests page table memory, for use during device bring-up. */
+#define PAGE_TEST 0
+#if PAGE_TEST
+static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
+#endif
+
+/* This option dumps out the PT if an assert fails */
+#define PT_DUMP 1
+
+/* This option sanity checks page table PTE valid count matches active PTEs */
+#define PT_DEBUG 0
+#if (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF)
+static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
+{
+ IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
+ IMG_UINT32 i;
+
+ /* 1024 entries in a 4K page table */
+ for(i = 0; i < 1024; i += 8)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%08X %08X %08X %08X %08X %08X %08X %08X\n",
+ p[i + 0], p[i + 1], p[i + 2], p[i + 3],
+ p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
+ }
+}
+#else /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */
+static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
+{
+ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
+}
+#endif /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */
+
+#if PT_DEBUG
+static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
+{
+ IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
+ IMG_UINT32 i, ui32Count = 0;
+
+ /* 1024 entries in a 4K page table */
+ for(i = 0; i < 1024; i++)
+ if(p[i] & SGX_MMU_PTE_VALID)
+ ui32Count++;
+
+ if(psPTInfoList->ui32ValidPTECount != ui32Count)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ui32ValidPTECount: %u ui32Count: %u\n",
+ psPTInfoList->ui32ValidPTECount, ui32Count));
+ DumpPT(psPTInfoList);
+ BUG();
+ }
+}
+#else /* PT_DEBUG */
+static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
+{
+ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
+}
+#endif /* PT_DEBUG */
+
+/*
+ Debug functionality that allows us to make the CPU
+ mapping of pagetable memory readonly and only make
+ it read/write when we alter it. This allows us
+ to check that our memory isn't being overwritten
+*/
+#if defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND)
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#else
+#include <generated/autoconf.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr)
+{
+ pgd_t *psPGD;
+ pud_t *psPUD;
+ pmd_t *psPMD;
+ pte_t *psPTE;
+ pte_t ptent;
+ IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr;
+
+ psPGD = pgd_offset_k(ui32CPUVAddr);
+ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPUD = pud_offset(psPGD, ui32CPUVAddr);
+ if (pud_none(*psPUD) || pud_bad(*psPUD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPMD = pmd_offset(psPUD, ui32CPUVAddr);
+ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
+ {
+ PVR_ASSERT(0);
+ }
+ psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr);
+
+ ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE);
+ ptent = pte_mkwrite(ptent);
+ ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent);
+
+ flush_tlb_all();
+}
+
+static IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr)
+{
+ pgd_t *psPGD;
+ pud_t *psPUD;
+ pmd_t *psPMD;
+ pte_t *psPTE;
+ pte_t ptent;
+ IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr;
+
+ OSWriteMemoryBarrier();
+
+ psPGD = pgd_offset_k(ui32CPUVAddr);
+ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPUD = pud_offset(psPGD, ui32CPUVAddr);
+ if (pud_none(*psPUD) || pud_bad(*psPUD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPMD = pmd_offset(psPUD, ui32CPUVAddr);
+ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr);
+
+ ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE);
+ ptent = pte_wrprotect(ptent);
+ ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent);
+
+ flush_tlb_all();
+
+}
+
+#else /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */
+
+static INLINE IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(ulCPUVAddr);
+}
+
+static INLINE IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(ulCPUVAddr);
+}
+
+#endif /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */
+
+/*___________________________________________________________________________
+
+ Information for SUPPORT_PDUMP_MULTI_PROCESS feature.
+
+ The client marked for pdumping will set the bPDumpActive flag in
+ the MMU Context (see MMU_Initialise).
+
+ Shared heap allocations should be persistent so all apps which
+ are pdumped will see the allocation. Persistent flag over-rides
+ the bPDumpActive flag (see pdump_common.c/DbgWrite function).
+
+ The idea is to dump PT,DP for shared heap allocations, but only
+ dump the PDE if the allocation is mapped into the kernel or active
+ client context. This ensures if a background app allocates on a
+ shared heap then all clients can access it in the pdump toolchain.
+
+
+
+ PD PT DP
+ +-+
+ | |---> +-+
+ +-+ | |---> +-+
+ +-+ + +
+ +-+
+
+ PD allocation/free: pdump flags are 0 (only need PD for active apps)
+ PT allocation/free: pdump flags are 0
+ unless PT is for a shared heap, in which case persistent is set
+ PD entries (MMU init/insert shared heap):
+ only pdump if PDE is on the active MMU context, flags are 0
+ PD entries (PT alloc):
+ pdump flags are 0 if kernel heap
+ pdump flags are 0 if shared heap and PDE is on active MMU context
+ otherwise ignore.
+ PT entries pdump flags are 0
+ unless PTE is for a shared heap, in which case persistent is set
+
+ NOTE: PDump common code:-
+ PDumpMallocPages and PDumpMemKM also set the persistent flag for
+ shared heap allocations.
+
+ ___________________________________________________________________________
+*/
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_IsHeapShared
+
+ PURPOSE: Is this heap shared?
+ PARAMETERS: In: pMMU_Heap
+ RETURNS: true if heap is shared
+******************************************************************************/
+IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMUHeap)
+{
+ switch(pMMUHeap->psDevArena->DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED :
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
+ return IMG_TRUE;
+ case DEVICE_MEMORY_HEAP_PERCONTEXT :
+ case DEVICE_MEMORY_HEAP_KERNEL :
+ return IMG_FALSE;
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_IsHeapShared: ERROR invalid heap type"));
+ return IMG_FALSE;
+ }
+ }
+}
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+/*!
+******************************************************************************
+ FUNCTION: EnableHostAccess
+
+ PURPOSE: Enables Host accesses to device memory, by passing the device
+ MMU address translation
+
+ PARAMETERS: In: psMMUContext
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+EnableHostAccess (MMU_CONTEXT *psMMUContext)
+{
+ IMG_UINT32 ui32RegVal;
+ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
+
+ /*
+ bypass the MMU for the host port requestor,
+ conserving bypass state of other requestors
+ */
+ ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
+
+ OSWriteHWReg(pvRegsBaseKM,
+ EUR_CR_BIF_CTRL,
+ ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
+ /* assume we're not wiping-out any other bits */
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
+}
+
+/*!
+******************************************************************************
+ FUNCTION: DisableHostAccess
+
+ PURPOSE: Disables Host accesses to device memory, by passing the device
+ MMU address translation
+
+ PARAMETERS: In: psMMUContext
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+DisableHostAccess (MMU_CONTEXT *psMMUContext)
+{
+ IMG_UINT32 ui32RegVal;
+ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
+
+ /*
+ disable MMU-bypass for the host port requestor,
+ conserving bypass state of other requestors
+ and flushing all caches/tlbs
+ */
+ OSWriteHWReg(pvRegsBaseKM,
+ EUR_CR_BIF_CTRL,
+ ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
+ /* assume we're not wiping-out any other bits */
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, 0);
+}
+#endif
+
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+/*!
+******************************************************************************
+ FUNCTION: MMU_InvalidateSystemLevelCache
+
+ PURPOSE: Invalidates the System Level Cache to purge stale PDEs and PTEs
+
+ PARAMETERS: In: psDevInfo
+ RETURNS: None
+
+******************************************************************************/
+static IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ #if defined(SGX_FEATURE_MP)
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
+ #else
+ /* The MMU always bypasses the SLC */
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ #endif /* SGX_FEATURE_MP */
+}
+#endif /* SGX_FEATURE_SYSTEM_CACHE */
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_InvalidateDirectoryCache
+
+ PURPOSE: Invalidates the page directory cache + page table cache + requestor TLBs
+
+ PARAMETERS: In: psDevInfo
+ RETURNS: None
+
+******************************************************************************/
+IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ MMU_InvalidateSystemLevelCache(psDevInfo);
+ #endif /* SGX_FEATURE_SYSTEM_CACHE */
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_InvalidatePageTableCache
+
+ PURPOSE: Invalidates the page table cache + requestor TLBs
+
+ PARAMETERS: In: psDevInfo
+ RETURNS: None
+
+******************************************************************************/
+static IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT;
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ MMU_InvalidateSystemLevelCache(psDevInfo);
+ #endif /* SGX_FEATURE_SYSTEM_CACHE */
+}
+
+#if defined(FIX_HW_BRN_31620)
+/*!
+******************************************************************************
+ FUNCTION: BRN31620InvalidatePageTableEntry
+
+ PURPOSE: Frees page tables in PDE cache line chunks re-wiring the
+ dummy page when required
+
+ PARAMETERS: In: psMMUContext, ui32PDIndex, ui32PTIndex
+ RETURNS: None
+
+******************************************************************************/
+static IMG_VOID BRN31620InvalidatePageTableEntry(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32PDIndex, IMG_UINT32 ui32PTIndex, IMG_UINT32 *pui32PTE)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
+
+ /*
+ * Note: We can't tell at this stage if this PT will be freed before
+ * the end of the function so we always wire up the dummy page to
+ * to the PT.
+ */
+ if (((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX)
+ && (ui32PTIndex == BRN31620_DUMMY_PTE_INDEX))
+ {
+ *pui32PTE = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID;
+ }
+ else
+ {
+ *pui32PTE = 0;
+ }
+}
+
+/*!
+******************************************************************************
+ FUNCTION: BRN31620FreePageTable
+
+ PURPOSE: Frees page tables in PDE cache line chunks re-wiring the
+ dummy page when required
+
+ PARAMETERS: In: psMMUContext, ui32PDIndex
+ RETURNS: IMG_TRUE if we freed any PT's
+
+******************************************************************************/
+static IMG_BOOL BRN31620FreePageTable(MMU_HEAP *psMMUHeap, IMG_UINT32 ui32PDIndex)
+{
+ MMU_CONTEXT *psMMUContext = psMMUHeap->psMMUContext;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
+ IMG_UINT32 ui32PDCacheLine = ui32PDIndex >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
+ IMG_UINT32 bFreePTs = IMG_FALSE;
+ IMG_UINT32 *pui32Tmp;
+
+ PVR_ASSERT(psMMUHeap != IMG_NULL);
+
+ /*
+ * Clear the PT info for this PD index so even if we don't
+ * free the memory here apsPTInfoList[PDIndex] will trigger
+ * an "allocation" in _DeferredAllocPagetables which
+ * bumps up the refcount.
+ */
+ PVR_ASSERT(psMMUContext->apsPTInfoListSave[ui32PDIndex] == IMG_NULL);
+
+ psMMUContext->apsPTInfoListSave[ui32PDIndex] = psMMUContext->apsPTInfoList[ui32PDIndex];
+ psMMUContext->apsPTInfoList[ui32PDIndex] = IMG_NULL;
+
+ /* Check if this was the last PT in the cache line */
+ if (--psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine] == 0)
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndexStart = ui32PDCacheLine * BRN31620_PDES_PER_CACHE_LINE_SIZE;
+ IMG_UINT32 ui32PDIndexEnd = ui32PDIndexStart + BRN31620_PDES_PER_CACHE_LINE_SIZE;
+ IMG_UINT32 ui32PDBitMaskIndex, ui32PDBitMaskShift;
+
+ /* Free all PT's in cache line */
+ for (i=ui32PDIndexStart;i<ui32PDIndexEnd;i++)
+ {
+ /* This PT is _really_ being freed now */
+ psMMUContext->apsPTInfoList[i] = psMMUContext->apsPTInfoListSave[i];
+ psMMUContext->apsPTInfoListSave[i] = IMG_NULL;
+ _DeferredFreePageTable(psMMUHeap, i - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
+ }
+
+ ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT;
+ ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK;
+
+ /* Check if this is a shared heap */
+ if (MMU_IsHeapShared(psMMUHeap))
+ {
+ /* Mark the remove of the Page Table from all memory contexts */
+ MMU_CONTEXT *psMMUContextWalker = (MMU_CONTEXT*) psMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContextWalker)
+ {
+ psMMUContextWalker->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
+
+ /*
+ * We've just cleared a cache line's worth of PDE's so we need
+ * to wire up the dummy PT
+ */
+ MakeKernelPageReadWrite(psMMUContextWalker->pvPDCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *) psMMUContextWalker->pvPDCpuVAddr;
+ pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_DUMMY_PAGE
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(psMMUContextWalker->pvPDCpuVAddr);
+
+ PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block");
+ PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContextWalker->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ psMMUContextWalker = psMMUContextWalker->psNext;
+ }
+ }
+ else
+ {
+ psMMUContext->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
+
+ /*
+ * We've just cleared a cache line's worth of PDE's so we need
+ * to wire up the dummy PT
+ */
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
+ pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_DUMMY_PAGE
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+
+ PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block");
+ PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ /* We've freed a cachline's worth of PDE's so trigger a PD cache flush */
+ bFreePTs = IMG_TRUE;
+ }
+
+ return bFreePTs;
+}
+#endif
+
+/*!
+******************************************************************************
+ FUNCTION: _AllocPageTableMemory
+
+ PURPOSE: Allocate physical memory for a page table
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ In: psPTInfoList - PT info
+ Out: psDevPAddr - device physical address for new PT
+ RETURNS: IMG_TRUE - Success
+ IMG_FALSE - Failed
+******************************************************************************/
+static IMG_BOOL
+_AllocPageTableMemory (MMU_HEAP *pMMUHeap,
+ MMU_PT_INFO *psPTInfoList,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+ /*
+ depending on the specific system, pagetables are allocated from system memory
+ or device local memory. For now, just look for at least a valid local heap/arena
+ */
+ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
+ {
+ //FIXME: replace with an RA, this allocator only handles 4k allocs
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ pMMUHeap->ui32PTSize,
+ SGX_MMU_PAGE_SIZE,//FIXME: assume 4K page size for now (wastes memory for smaller pagetables
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
+ &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
+ return IMG_FALSE;
+ }
+
+ /*
+ Force the page to read only, we will make it read/write as
+ and when we need to
+ */
+ MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr);
+
+ /* translate address to device physical */
+ if(psPTInfoList->PTPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
+ psPTInfoList->PTPageCpuVAddr);
+ }
+ else
+ {
+ /* This isn't used in all cases since not all ports currently support
+ * OSMemHandleToCpuPAddr() */
+ sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
+ }
+
+ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+ /*
+ just allocate from the first local memory arena
+ (unlikely to be more than one local mem area(?))
+ */
+ //FIXME: just allocate a 4K page for each PT for now
+ if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
+ return IMG_FALSE;
+ }
+
+ /* derive the CPU virtual address */
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */
+ psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psPTInfoList->hPTPageOSMemHandle);
+ if(!psPTInfoList->PTPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
+ return IMG_FALSE;
+ }
+
+ /* translate address to device physical */
+ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ #if PAGE_TEST
+ PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
+ #endif
+ }
+
+ MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ {
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 i;
+
+ pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
+ /* point the new PT entries to the dummy data page */
+ for(i=0; i<pMMUHeap->ui32PTNumEntriesUsable; i++)
+ {
+ pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+ }
+ /* zero the remaining allocated entries, if any */
+ for(; i<pMMUHeap->ui32PTNumEntriesAllocated; i++)
+ {
+ pui32Tmp[i] = 0;
+ }
+ }
+#else
+ /* Zero the page table. */
+ OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
+#endif
+ MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr);
+
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32Flags = 0;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ /* make sure shared heap PT allocs are always pdumped */
+ ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+ /* pdump the PT malloc */
+ PDUMPMALLOCPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, psPTInfoList->hPTPageOSMemHandle, 0, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG);
+ /* pdump the PT Pages */
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfoList->hPTPageOSMemHandle, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+
+ /* return the DevPAddr */
+ *psDevPAddr = sDevPAddr;
+
+ return IMG_TRUE;
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: _FreePageTableMemory
+
+ PURPOSE: Free physical memory for a page table
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ In: psPTInfoList - PT info to free
+ RETURNS: NONE
+******************************************************************************/
+static IMG_VOID
+_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
+{
+ /*
+ free the PT page:
+ depending on the specific system, pagetables are allocated from system memory
+ or device local memory. For now, just look for at least a valid local heap/arena
+ */
+ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
+ {
+ /* Force the page to read write before we free it*/
+ MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr);
+
+ //FIXME: replace with an RA, this allocator only handles 4k allocs
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ pMMUHeap->ui32PTSize,
+ psPTInfoList->PTPageCpuVAddr,
+ psPTInfoList->hPTPageOSMemHandle);
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+ /* derive the system physical address */
+ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
+ psPTInfoList->PTPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
+
+ /* unmap the CPU mapping */
+ /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */
+ OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psPTInfoList->hPTPageOSMemHandle);
+
+ /*
+ just free from the first local memory arena
+ (unlikely to be more than one local mem area(?))
+ */
+ RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+}
+
+
+
+/*!
+******************************************************************************
+ FUNCTION: _DeferredFreePageTable
+
+ PURPOSE: Free one page table associated with an MMU.
+
+ PARAMETERS: In: pMMUHeap - the mmu heap
+ In: ui32PTIndex - index of the page table to free relative
+ to the base of heap.
+ RETURNS: None
+******************************************************************************/
+static IMG_VOID
+_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
+{
+ IMG_UINT32 *pui32PDEntry;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndex;
+ SYS_DATA *psSysData;
+ MMU_PT_INFO **ppsPTInfoList;
+
+ SysAcquireData(&psSysData);
+
+ /* find the index/offset in PD entries */
+ ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* set the base PT info */
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+ {
+#if PT_DEBUG
+ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
+ {
+ DumpPT(ppsPTInfoList[ui32PTIndex]);
+ /* Fall-through, will fail assert */
+ }
+#endif
+
+ /* Assert that all mappings have gone */
+ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
+ }
+
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32Flags = 0;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+ /* pdump the PT free */
+ PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
+ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
+ {
+ PDUMPFREEPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG);
+ }
+ }
+#endif
+
+ switch(pMMUHeap->psDevArena->DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED :
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
+ {
+ /* Remove Page Table from all memory contexts */
+ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContext)
+ {
+ /* get the PD CPUVAddr base and advance to the first entry */
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* point the PD entry to the dummy PT */
+ pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
+ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+#else
+ /* free the entry */
+ if(bOSFreePT)
+ {
+ pui32PDEntry[ui32PTIndex] = 0;
+ }
+#endif
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+ #if defined(PDUMP)
+ /* pdump the PD Page modifications */
+ #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+ #endif
+ {
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ #endif
+ /* advance to next context */
+ psMMUContext = psMMUContext->psNext;
+ }
+ break;
+ }
+ case DEVICE_MEMORY_HEAP_PERCONTEXT :
+ case DEVICE_MEMORY_HEAP_KERNEL :
+ {
+ MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+ /* Remove Page Table from this memory context only */
+ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* point the PD entry to the dummy PT */
+ pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
+ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+#else
+ /* free the entry */
+ if(bOSFreePT)
+ {
+ pui32PDEntry[ui32PTIndex] = 0;
+ }
+#endif
+ MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+
+ /* pdump the PD Page modifications */
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
+ return;
+ }
+ }
+
+ /* clear the PT entries in each PT page */
+ if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
+ {
+ if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
+ {
+ IMG_PUINT32 pui32Tmp;
+
+ MakeKernelPageReadWrite(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
+
+ /* clear the entries */
+ for(i=0;
+ (i<pMMUHeap->ui32PTETotalUsable) && (i<pMMUHeap->ui32PTNumEntriesUsable);
+ i++)
+ {
+ /* over-allocated PT entries for 4MB data page case should never be non-zero */
+ pui32Tmp[i] = 0;
+ }
+ MakeKernelPageReadOnly(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
+
+ /*
+ free the pagetable memory
+ */
+ if(bOSFreePT)
+ {
+ _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
+ }
+
+ /*
+ decrement the PT Entry Count by the number
+ of entries we've cleared in this pass
+ */
+ pMMUHeap->ui32PTETotalUsable -= i;
+ }
+ else
+ {
+ /* decrement the PT Entry Count by a page's worth of entries */
+ pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable;
+ }
+
+ if(bOSFreePT)
+ {
+ /* free the pt info */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(MMU_PT_INFO),
+ ppsPTInfoList[ui32PTIndex],
+ IMG_NULL);
+ ppsPTInfoList[ui32PTIndex] = IMG_NULL;
+ }
+ }
+ else
+ {
+ /* decrement the PT Entry Count by a page's worth of usable entries */
+ pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable;
+ }
+
+ PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
+}
+
+/*!
+******************************************************************************
+ FUNCTION: _DeferredFreePageTables
+
+ PURPOSE: Free the page tables associated with an MMU.
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ RETURNS: None
+******************************************************************************/
+static IMG_VOID
+_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
+{
+ IMG_UINT32 i;
+#if defined(FIX_HW_BRN_31620)
+ MMU_CONTEXT *psMMUContext = pMMUHeap->psMMUContext;
+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 j;
+#endif
+#if defined(PDUMP)
+ PDUMPCOMMENT("Free PTs (MMU Context ID == %u, PDBaseIndex == %u, PT count == 0x%x)",
+ pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
+ pMMUHeap->ui32PDBaseIndex,
+ pMMUHeap->ui32PageTableCount);
+#endif
+#if defined(FIX_HW_BRN_31620)
+ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
+ {
+ ui32PDIndex = (pMMUHeap->ui32PDBaseIndex + i);
+
+ if (psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
+ {
+ /*
+ * We have to do this to setup the dummy page as
+ * not all heaps are PD cache size or aligned
+ */
+ for (j=0;j<SGX_MMU_PT_SIZE;j++)
+ {
+ pui32Tmp = (IMG_UINT32 *) psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+ BRN31620InvalidatePageTableEntry(psMMUContext, ui32PDIndex, j, &pui32Tmp[j]);
+ }
+ }
+ /* Free the PT and NULL's out the PTInfo */
+ if (BRN31620FreePageTable(pMMUHeap, ui32PDIndex) == IMG_TRUE)
+ {
+ bInvalidateDirectoryCache = IMG_TRUE;
+ }
+ }
+ }
+
+ /*
+ * Due to freeing PT's in chunks we might need to flush the PT cache
+ * rather then the directory cache
+ */
+ if (bInvalidateDirectoryCache)
+ {
+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
+ }
+ else
+ {
+ MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
+ }
+#else
+ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
+ {
+ _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
+ }
+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
+#endif
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: _DeferredAllocPagetables
+
+ PURPOSE: allocates page tables at time of allocation
+
+ PARAMETERS: In: pMMUHeap - the mmu heap
+ DevVAddr - devVAddr of allocation
+ ui32Size - size of allocation
+ RETURNS: IMG_TRUE - Success
+ IMG_FALSE - Failed
+******************************************************************************/
+static IMG_BOOL
+_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
+{
+ IMG_UINT32 ui32PageTableCount;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 i;
+ IMG_UINT32 *pui32PDEntry;
+ MMU_PT_INFO **ppsPTInfoList;
+ SYS_DATA *psSysData;
+ IMG_DEV_VIRTADDR sHighDevVAddr;
+#if defined(FIX_HW_BRN_31620)
+ IMG_BOOL bFlushSystemCache = IMG_FALSE;
+ IMG_BOOL bSharedPT = IMG_FALSE;
+ IMG_DEV_VIRTADDR sDevVAddrRequestStart;
+ IMG_DEV_VIRTADDR sDevVAddrRequestEnd;
+ IMG_UINT32 ui32PDRequestStart;
+ IMG_UINT32 ui32PDRequestEnd;
+ IMG_UINT32 ui32ModifiedCachelines[BRN31620_CACHE_FLUSH_INDEX_SIZE];
+#endif
+
+ /* Check device linear address */
+#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
+ PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
+#endif
+
+ /* get the sysdata */
+ SysAcquireData(&psSysData);
+
+ /* find the index/offset in PD entries */
+ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* how many PDs does the allocation occupy? */
+ /* first check for overflows */
+ if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
+ < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
+ {
+ /* detected overflow, clamp to highest address */
+ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
+ }
+ else
+ {
+ sHighDevVAddr.uiAddr = DevVAddr.uiAddr
+ + ui32Size
+ + pMMUHeap->ui32DataPageMask
+ + pMMUHeap->ui32PTMask;
+ }
+
+ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* Fix allocation of last 4MB */
+ if (ui32PageTableCount == 0)
+ ui32PageTableCount = 1024;
+
+#if defined(FIX_HW_BRN_31620)
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ ui32ModifiedCachelines[i] = 0;
+ }
+
+ /*****************************************************************/
+ /* Save off requested data and round allocation to PD cache line */
+ /*****************************************************************/
+ sDevVAddrRequestStart = DevVAddr;
+ ui32PDRequestStart = ui32PDIndex;
+ sDevVAddrRequestEnd = sHighDevVAddr;
+ ui32PDRequestEnd = ui32PageTableCount - 1;
+
+ /* Round allocations down to the PD cacheline */
+ DevVAddr.uiAddr = DevVAddr.uiAddr & (~BRN31620_PDE_CACHE_FILL_MASK);
+
+ /* Round the end address of the PD allocation to cacheline */
+ sHighDevVAddr.uiAddr = ((sHighDevVAddr.uiAddr + (BRN31620_PDE_CACHE_FILL_SIZE - 1)) & (~BRN31620_PDE_CACHE_FILL_MASK));
+
+ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* Fix allocation of last 4MB */
+ if (ui32PageTableCount == 0)
+ ui32PageTableCount = 1024;
+#endif
+
+ ui32PageTableCount -= ui32PDIndex;
+
+ /* get the PD CPUVAddr base and advance to the first entry */
+ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+ /* and advance to the first PT info list */
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32Flags = 0;
+
+ /* pdump the PD Page modifications */
+ if( MMU_IsHeapShared(pMMUHeap) )
+ {
+ ui32Flags |= PDUMP_FLAGS_CONTINUOUS;
+ }
+ PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc PTs (MMU Context ID == %u, PDBaseIndex == %u, Size == 0x%x)",
+ pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
+ pMMUHeap->ui32PDBaseIndex,
+ ui32Size);
+ PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc page table (page count == %08X)", ui32PageTableCount);
+ PDUMPCOMMENTWITHFLAGS(ui32Flags, "Page directory mods (page count == %08X)", ui32PageTableCount);
+ }
+#endif
+ /* walk the psPTInfoList to see what needs allocating: */
+ for(i=0; i<ui32PageTableCount; i++)
+ {
+ if(ppsPTInfoList[i] == IMG_NULL)
+ {
+#if defined(FIX_HW_BRN_31620)
+ /* Check if we have a saved PT (i.e. this PDE cache line is still live) */
+ if (pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i])
+ {
+ /* Only make this PTInfo "live" if it's requested */
+ if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd))
+ {
+ IMG_UINT32 ui32PDCacheLine = (ui32PDIndex + i) >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
+
+ ppsPTInfoList[i] = pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i];
+ pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = IMG_NULL;
+
+ pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++;
+ }
+ }
+ else
+ {
+#endif
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (MMU_PT_INFO),
+ (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
+ "MMU Page Table Info");
+ if (ppsPTInfoList[i] == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
+ return IMG_FALSE;
+ }
+ OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
+#if defined(FIX_HW_BRN_31620)
+ }
+#endif
+ }
+#if defined(FIX_HW_BRN_31620)
+ /* Only try to allocate if ppsPTInfoList[i] is valid */
+ if (ppsPTInfoList[i])
+ {
+#endif
+ if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
+ && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
+ {
+ IMG_DEV_PHYADDR sDevPAddr;
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 j;
+#else
+#if !defined(FIX_HW_BRN_31620)
+ /* no page table has been allocated so allocate one */
+ PVR_ASSERT(pui32PDEntry[i] == 0);
+#endif
+#endif
+ if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
+ return IMG_FALSE;
+ }
+#if defined(FIX_HW_BRN_31620)
+ bFlushSystemCache = IMG_TRUE;
+ /* Bump up the page table count if required */
+ {
+ IMG_UINT32 ui32PD;
+ IMG_UINT32 ui32PDCacheLine;
+ IMG_UINT32 ui32PDBitMaskIndex;
+ IMG_UINT32 ui32PDBitMaskShift;
+
+ ui32PD = ui32PDIndex + i;
+ ui32PDCacheLine = ui32PD >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
+ ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT;
+ ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK;
+ ui32ModifiedCachelines[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
+
+ /* Add 1 to ui32PD as we want the count, not a range */
+ if ((pMMUHeap->ui32PDBaseIndex + pMMUHeap->ui32PageTableCount) < (ui32PD + 1))
+ {
+ pMMUHeap->ui32PageTableCount = (ui32PD + 1) - pMMUHeap->ui32PDBaseIndex;
+ }
+
+ if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd))
+ {
+ pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++;
+ }
+ }
+#endif
+ switch(pMMUHeap->psDevArena->DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED :
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
+ {
+ /* insert Page Table into all memory contexts */
+ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContext)
+ {
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ /* get the PD CPUVAddr base and advance to the first entry */
+ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+ /* insert the page, specify the data page size and make the pde valid */
+ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | pMMUHeap->ui32PDEPageSizeCtrl
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+ #if defined(PDUMP)
+ /* pdump the PD Page modifications */
+ #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+ #endif
+ {
+ //PDUMPCOMMENT("_DeferredAllocPTs: Dumping shared PDEs on context %d (%s)", psMMUContext->ui32PDumpMMUContextID, (psMMUContext->bPDumpActive) ? "active" : "");
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ #endif /* PDUMP */
+ /* advance to next context */
+ psMMUContext = psMMUContext->psNext;
+ }
+#if defined(FIX_HW_BRN_31620)
+ bSharedPT = IMG_TRUE;
+#endif
+ break;
+ }
+ case DEVICE_MEMORY_HEAP_PERCONTEXT :
+ case DEVICE_MEMORY_HEAP_KERNEL :
+ {
+ MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+ /* insert Page Table into only this memory context */
+ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | pMMUHeap->ui32PDEPageSizeCtrl
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+ /* pdump the PD Page modifications */
+ //PDUMPCOMMENT("_DeferredAllocPTs: Dumping kernel PDEs on context %d (%s)", pMMUHeap->psMMUContext->ui32PDumpMMUContextID, (pMMUHeap->psMMUContext->bPDumpActive) ? "active" : "");
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
+ return IMG_FALSE;
+ }
+ }
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ /* This is actually not to do with multiple mem contexts, but to do with the directory cache.
+ In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the
+ page directory memory, and requires updating whenever the page directory changes, even if there
+ was no previous value in a particular entry
+ */
+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
+#endif
+#if defined(FIX_HW_BRN_31620)
+ /* If this PT is not in the requested range then save it and null out the main PTInfo */
+ if (((ui32PDIndex + i) < ui32PDRequestStart) || ((ui32PDIndex + i) > ui32PDRequestEnd))
+ {
+ pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = ppsPTInfoList[i];
+ ppsPTInfoList[i] = IMG_NULL;
+ }
+#endif
+ }
+ else
+ {
+#if !defined(FIX_HW_BRN_31620)
+ /* already have an allocated PT */
+ PVR_ASSERT(pui32PDEntry[i] != 0);
+#endif
+ }
+#if defined(FIX_HW_BRN_31620)
+ }
+#endif
+ }
+
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(FIX_HW_BRN_31620)
+ /* This function might not allocate any new PT's so check before flushing */
+ if (bFlushSystemCache)
+ {
+ #endif
+
+ MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
+ #endif /* SGX_FEATURE_SYSTEM_CACHE */
+ #if defined(FIX_HW_BRN_31620)
+ }
+
+ /* Handle the last 4MB roll over */
+ sHighDevVAddr.uiAddr = sHighDevVAddr.uiAddr - 1;
+
+ /* Update our PD flush mask if required */
+ if (bFlushSystemCache)
+ {
+ MMU_CONTEXT *psMMUContext;
+
+ if (bSharedPT)
+ {
+ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContext)
+ {
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i];
+ }
+
+ /* advance to next context */
+ psMMUContext = psMMUContext->psNext;
+ }
+ }
+ else
+ {
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ pMMUHeap->psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i];
+ }
+ }
+
+ /*
+ * Always hook up the dummy page when we allocate a new range of PTs.
+ * It might be this is overwritten before the SGX access the dummy page
+ * but we don't care, it's a lot simpler to add this logic here.
+ */
+ psMMUContext = pMMUHeap->psMMUContext;
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ IMG_UINT32 j;
+
+ for(j=0;j<BRN31620_CACHE_FLUSH_BITS_SIZE;j++)
+ {
+ if (ui32ModifiedCachelines[i] & (1 << j))
+ {
+ PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
+ MMU_PT_INFO *psTempPTInfo = IMG_NULL;
+ IMG_UINT32 *pui32Tmp;
+
+ ui32PDIndex = (((i * BRN31620_CACHE_FLUSH_BITS_SIZE) + j) * BRN31620_PDES_PER_CACHE_LINE_SIZE) + BRN31620_DUMMY_PDE_INDEX;
+
+ /* The PT for the dummy page might not be "live". If not get it from the saved pointer */
+ if (psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ psTempPTInfo = psMMUContext->apsPTInfoList[ui32PDIndex];
+ }
+ else
+ {
+ psTempPTInfo = psMMUContext->apsPTInfoListSave[ui32PDIndex];
+ }
+
+ PVR_ASSERT(psTempPTInfo != IMG_NULL);
+
+ MakeKernelPageReadWrite(psTempPTInfo->PTPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *) psTempPTInfo->PTPageCpuVAddr;
+ PVR_ASSERT(pui32Tmp != IMG_NULL);
+ pui32Tmp[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID;
+ MakeKernelPageReadOnly(psTempPTInfo->PTPageCpuVAddr);
+ PDUMPCOMMENT("BRN31620 Dump PTE for dummy page after wireing up new PT");
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psTempPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32Tmp[BRN31620_DUMMY_PTE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ }
+ }
+ }
+ #endif
+
+ return IMG_TRUE;
+}
+
+
+#if defined(PDUMP)
+/*!
+ * FUNCTION: MMU_GetPDumpContextID
+ *
+ * RETURNS: pdump MMU context ID
+ */
+IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext)
+{
+ BM_CONTEXT *pBMContext = hDevMemContext;
+ PVR_ASSERT(pBMContext);
+ /* PRQA S 0505 1 */ /* PVR_ASSERT should catch NULL ptr */
+ return pBMContext->psMMUContext->ui32PDumpMMUContextID;
+}
+
+/*!
+ * FUNCTION: MMU_SetPDumpAttribs
+ *
+ * PURPOSE: Called from MMU_Initialise and MMU_Create.
+ * Sets up device-specific attributes for pdumping.
+ * FIXME: breaks variable size PTs. Really need separate per context
+ * and per heap attribs.
+ *
+ * INPUT: psDeviceNode - used to access deviceID
+ * INPUT: ui32DataPageMask - data page mask
+ * INPUT: ui32PTSize - PT size
+ *
+ * OUTPUT: psMMUAttrib - pdump MMU attributes
+ *
+ * RETURNS: none
+ */
+#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
+# error "FIXME: breaks variable size pagetables"
+#endif
+static IMG_VOID MMU_SetPDumpAttribs(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DataPageMask,
+ IMG_UINT32 ui32PTSize)
+{
+ /* Sets up device ID, contains pdump memspace name */
+ psMMUAttrib->sDevId = psDeviceNode->sDevId;
+
+ psMMUAttrib->pszPDRegRegion = IMG_NULL;
+ psMMUAttrib->ui32DataPageMask = ui32DataPageMask;
+
+ psMMUAttrib->ui32PTEValid = SGX_MMU_PTE_VALID;
+ psMMUAttrib->ui32PTSize = ui32PTSize;
+ psMMUAttrib->ui32PTEAlignShift = SGX_MMU_PTE_ADDR_ALIGNSHIFT;
+
+ psMMUAttrib->ui32PDEMask = SGX_MMU_PDE_ADDR_MASK;
+ psMMUAttrib->ui32PDEAlignShift = SGX_MMU_PDE_ADDR_ALIGNSHIFT;
+}
+#endif /* PDUMP */
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Initialise
+
+ PURPOSE: Called from BM_CreateContext.
+ Allocates the top level Page Directory 4k Page for the new context.
+
+ PARAMETERS: None
+ RETURNS: PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR
+MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
+{
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 i;
+ IMG_CPU_VIRTADDR pvPDCpuVAddr;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ MMU_CONTEXT *psMMUContext;
+ IMG_HANDLE hPDOSMemHandle = IMG_NULL;
+ SYS_DATA *psSysData;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+#if defined(PDUMP)
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+#endif
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
+
+ SysAcquireData(&psSysData);
+#if defined(PDUMP)
+ /* Note: these attribs are on the stack, used only to pdump the MMU context
+ * creation. */
+ MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
+ SGX_MMU_PAGE_MASK,
+ SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
+#endif
+
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (MMU_CONTEXT),
+ (IMG_VOID **)&psMMUContext, IMG_NULL,
+ "MMU Context");
+ if (psMMUContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
+
+ /* stick the devinfo in the context for subsequent use */
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ psMMUContext->psDevInfo = psDevInfo;
+
+ /* record device node for subsequent use */
+ psMMUContext->psDeviceNode = psDeviceNode;
+
+ /* allocate 4k page directory page for the new context */
+ if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
+ {
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ &pvPDCpuVAddr,
+ &hPDOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ if(pvPDCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(hPDOSMemHandle,
+ pvPDCpuVAddr);
+ }
+ else
+ {
+ /* This is not used in all cases, since not all ports currently
+ * support OSMemHandleToCpuPAddr */
+ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
+ }
+ sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ #if PAGE_TEST
+ PageTest(pvPDCpuVAddr, sPDDevPAddr);
+ #endif
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* Allocate dummy PT and Data pages for the first context to be created */
+ if(!psDevInfo->pvMMUContextList)
+ {
+ /* Dummy PT page */
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ &psDevInfo->pvDummyPTPageCpuVAddr,
+ &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ if(psDevInfo->pvDummyPTPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
+ psDevInfo->pvDummyPTPageCpuVAddr);
+ }
+ else
+ {
+ /* This is not used in all cases, since not all ports currently
+ * support OSMemHandleToCpuPAddr */
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
+ }
+ psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ /* Dummy Data page */
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ &psDevInfo->pvDummyDataPageCpuVAddr,
+ &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ if(psDevInfo->pvDummyDataPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
+ psDevInfo->pvDummyDataPageCpuVAddr);
+ }
+ else
+ {
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
+ }
+ psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ }
+#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
+#if defined(FIX_HW_BRN_31620)
+ /* Allocate dummy Data pages for the first context to be created */
+ if(!psDevInfo->pvMMUContextList)
+ {
+ IMG_UINT32 j;
+ /* Allocate dummy page */
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ &psDevInfo->pvBRN31620DummyPageCpuVAddr,
+ &psDevInfo->hBRN31620DummyPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ /* Get a physical address */
+ if(psDevInfo->pvBRN31620DummyPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle,
+ psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ }
+ else
+ {
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPageOSMemHandle, 0);
+ }
+
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr;
+ for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++)
+ {
+ pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE;
+ }
+
+ psDevInfo->sBRN31620DummyPageDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+ /* Allocate dummy PT */
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ &psDevInfo->pvBRN31620DummyPTCpuVAddr,
+ &psDevInfo->hBRN31620DummyPTOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ /* Get a physical address */
+ if(psDevInfo->pvBRN31620DummyPTCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle,
+ psDevInfo->pvBRN31620DummyPTCpuVAddr);
+ }
+ else
+ {
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPTOSMemHandle, 0);
+ }
+
+ OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE);
+ psDevInfo->sBRN31620DummyPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+ /* allocate from the device's local memory arena */
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+ /* derive the CPU virtual address */
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &hPDOSMemHandle);
+ if(!pvPDCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ #if PAGE_TEST
+ PageTest(pvPDCpuVAddr, sPDDevPAddr);
+ #endif
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* Allocate dummy PT and Data pages for the first context to be created */
+ if(!psDevInfo->pvMMUContextList)
+ {
+ /* Dummy PT page */
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+ /* derive the CPU virtual address */
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hDummyPTPageOSMemHandle);
+ if(!psDevInfo->pvDummyPTPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ /* Dummy Data page */
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+ /* derive the CPU virtual address */
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hDummyDataPageOSMemHandle);
+ if(!psDevInfo->pvDummyDataPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+ }
+#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
+#if defined(FIX_HW_BRN_31620)
+ /* Allocate dummy PT and Data pages for the first context to be created */
+ if(!psDevInfo->pvMMUContextList)
+ {
+ IMG_UINT32 j;
+ /* Allocate dummy page */
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+ /* derive the CPU virtual address */
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sBRN31620DummyPageDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvBRN31620DummyPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hBRN31620DummyPageOSMemHandle);
+ if(!psDevInfo->pvBRN31620DummyPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr;
+ for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++)
+ {
+ pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE;
+ }
+ MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+ /* Allocate dummy PT */
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+ /* derive the CPU virtual address */
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sBRN31620DummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvBRN31620DummyPTCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hBRN31620DummyPTOSMemHandle);
+
+ if(!psDevInfo->pvBRN31620DummyPTCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ }
+#endif /* #if defined(FIX_HW_BRN_31620) */
+ }
+
+#if defined(FIX_HW_BRN_31620)
+ if (!psDevInfo->pvMMUContextList)
+ {
+ /* Save the kernel MMU context which is always the 1st to be created */
+ psDevInfo->hKernelMMUContext = psMMUContext;
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: saving kernel mmu context: %p", psMMUContext));
+ }
+#endif
+
+#if defined(PDUMP)
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ /* Find out if this context is for the active pdump client.
+ * If it is, need to ensure PD entries are pdumped whenever another
+ * process allocates from a shared heap. */
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc == IMG_NULL)
+ {
+ /* changes to the kernel context PD/PTs should be pdumped */
+ psMMUContext->bPDumpActive = IMG_TRUE;
+ }
+ else
+ {
+ psMMUContext->bPDumpActive = psPerProc->bPDumpActive;
+ }
+ }
+#endif /* SUPPORT_PDUMP_MULTI_PROCESS */
+ /* pdump the PD malloc */
+#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
+ PDUMPCOMMENT("Alloc page directory for new MMU context (PDDevPAddr == 0x%08x)",
+ sPDDevPAddr.uiAddr);
+#else
+ PDUMPCOMMENT("Alloc page directory for new MMU context, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)",
+ sPDDevPAddr.uiHighAddr, sPDDevPAddr.uiAddr);
+#endif
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDOSMemHandle, 0, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG);
+#endif /* PDUMP */
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(psMMUContext);
+#endif
+
+ if (pvPDCpuVAddr)
+ {
+ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
+ return PVRSRV_ERROR_INVALID_CPU_ADDR;
+ }
+
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ MakeKernelPageReadWrite(pvPDCpuVAddr);
+ /* wire-up the new PD to the dummy PT */
+ for(i=0; i<SGX_MMU_PD_SIZE; i++)
+ {
+ pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+ }
+ MakeKernelPageReadOnly(pvPDCpuVAddr);
+
+ if(!psDevInfo->pvMMUContextList)
+ {
+ /*
+ if we've just allocated the dummy pages
+ wire up the dummy PT to the dummy data page
+ */
+ MakeKernelPageReadWrite(psDevInfo->pvDummyPTPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
+ for(i=0; i<SGX_MMU_PT_SIZE; i++)
+ {
+ pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+ }
+ MakeKernelPageReadOnly(psDevInfo->pvDummyPTPageCpuVAddr);
+ /* pdump the Dummy PT Page */
+ PDUMPCOMMENT("Dummy Page table contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyPTOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+
+ /*
+ write a signature to the dummy data page
+ */
+ MakeKernelPageReadWrite(psDevInfo->pvDummyDataPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
+ for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
+ {
+ pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
+ }
+ MakeKernelPageReadOnly(psDevInfo->pvDummyDataPageCpuVAddr);
+ /* pdump the Dummy Data Page */
+ PDUMPCOMMENT("Dummy Data Page contents");
+ PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#else /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
+ /* initialise the PD to invalid address state */
+ MakeKernelPageReadWrite(pvPDCpuVAddr);
+ for(i=0; i<SGX_MMU_PD_SIZE; i++)
+ {
+ /* invalid, no read, no write, no cache consistency */
+ pui32Tmp[i] = 0;
+ }
+ MakeKernelPageReadOnly(pvPDCpuVAddr);
+#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
+
+#if defined(PDUMP)
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+#endif /* SUPPORT_PDUMP_MULTI_PROCESS */
+ {
+ /* pdump the PD Page */
+ PDUMPCOMMENT("Page directory contents");
+ PDUMPPDENTRIES(&sMMUAttrib, hPDOSMemHandle, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDCount = 0;
+ IMG_UINT32 *pui32PT;
+ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
+
+ PDUMPCOMMENT("BRN31620 Set up dummy PT");
+
+ MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPTCpuVAddr);
+ pui32PT = (IMG_UINT32 *) psDevInfo->pvBRN31620DummyPTCpuVAddr;
+ pui32PT[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID;
+ MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPTCpuVAddr);
+
+#if defined(PDUMP)
+ /* Dump initial contents */
+ PDUMPCOMMENT("BRN31620 Dump dummy PT contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ PDUMPCOMMENT("BRN31620 Dump dummy page contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+
+ /* Dump the wiring */
+ for(i=0;i<SGX_MMU_PT_SIZE;i++)
+ {
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, &pui32PT[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+ PDUMPCOMMENT("BRN31620 Dump PDE wire up");
+ /* Walk the PD wireing up the PT's */
+ for(i=0;i<SGX_MMU_PD_SIZE;i++)
+ {
+ pui32Tmp[i] = 0;
+
+ if (ui32PDCount == BRN31620_DUMMY_PDE_INDEX)
+ {
+ MakeKernelPageReadWrite(pvPDCpuVAddr);
+ pui32Tmp[i] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_DUMMY_PAGE
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(pvPDCpuVAddr);
+ }
+ PDUMPMEMPTENTRIES(&sMMUAttrib, hPDOSMemHandle, (IMG_VOID *) &pui32Tmp[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ ui32PDCount++;
+ if (ui32PDCount == BRN31620_PDES_PER_CACHE_LINE_SIZE)
+ {
+ /* Reset PT count */
+ ui32PDCount = 0;
+ }
+ }
+
+
+ /* pdump the Dummy PT Page */
+ PDUMPCOMMENT("BRN31620 dummy Page table contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+#if defined(PDUMP)
+ /* pdump set MMU context */
+ {
+ PVRSRV_ERROR eError;
+ /* default MMU type is 1, 4k page */
+ IMG_UINT32 ui32MMUType = 1;
+
+ #if defined(SGX_FEATURE_36BIT_MMU)
+ ui32MMUType = 3;
+ #else
+ #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
+ ui32MMUType = 2;
+ #endif
+ #endif
+
+ eError = PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
+ psDeviceNode->sDevId.pszPDumpDevName,
+ &psMMUContext->ui32PDumpMMUContextID,
+ ui32MMUType,
+ PDUMP_PT_UNIQUETAG,
+ hPDOSMemHandle,
+ pvPDCpuVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
+ return eError;
+ }
+ }
+
+ /* PDump the context ID */
+ PDUMPCOMMENT("Set MMU context complete (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+ for(i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ psMMUContext->ui32PDChangeMask[i] = 0;
+ }
+
+ for(i=0;i<BRN31620_CACHE_FLUSH_SIZE;i++)
+ {
+ psMMUContext->ui32PDCacheRangeRefCount[i] = 0;
+ }
+
+ for(i=0;i<SGX_MAX_PD_ENTRIES;i++)
+ {
+ psMMUContext->apsPTInfoListSave[i] = IMG_NULL;
+ }
+#endif
+ /* store PD info in the MMU context */
+ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
+ psMMUContext->sPDDevPAddr = sPDDevPAddr;
+ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
+
+ /* Get some process information to aid debug */
+ psMMUContext->ui32PID = OSGetCurrentProcessIDKM();
+ psMMUContext->szName[0] = '\0';
+ OSGetCurrentProcessNameKM(psMMUContext->szName, MMU_CONTEXT_NAME_SIZE);
+
+ /* return context */
+ *ppsMMUContext = psMMUContext;
+
+ /* return the PD DevVAddr */
+ *psPDDevPAddr = sPDDevPAddr;
+
+
+ /* add the new MMU context onto the list of MMU contexts */
+ psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
+ psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(psMMUContext);
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Finalise
+
+ PURPOSE: Finalise the mmu module, deallocate all resources.
+
+ PARAMETERS: In: psMMUContext - MMU context to deallocate
+ RETURNS: None.
+******************************************************************************/
+IMG_VOID
+MMU_Finalise (MMU_CONTEXT *psMMUContext)
+{
+ IMG_UINT32 *pui32Tmp, i;
+ SYS_DATA *psSysData;
+ MMU_CONTEXT **ppsMMUContext;
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined(FIX_HW_BRN_31620)
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
+ MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
+#endif
+
+ SysAcquireData(&psSysData);
+
+#if defined(PDUMP)
+ /* pdump the MMU context clear */
+ PDUMPCOMMENT("Clear MMU context (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
+ PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->psDeviceNode->sDevId.pszPDumpDevName, psMMUContext->ui32PDumpMMUContextID, 2);
+
+ /* pdump the PD free */
+#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
+ PDUMPCOMMENT("Free page directory (PDDevPAddr == 0x%08x)",
+ psMMUContext->sPDDevPAddr.uiAddr);
+#else
+ PDUMPCOMMENT("Free page directory, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)",
+ psMMUContext->sPDDevPAddr.uiHighAddr, psMMUContext->sPDDevPAddr.uiAddr);
+#endif
+#endif /* PDUMP */
+
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psMMUContext->hPDOSMemHandle, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+#endif
+
+ pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
+
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ /* initialise the PD to invalid address state */
+ for(i=0; i<SGX_MMU_PD_SIZE; i++)
+ {
+ /* invalid, no read, no write, no cache consistency */
+ pui32Tmp[i] = 0;
+ }
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+
+ /*
+ free the PD:
+ depending on the specific system, the PD is allocated from system memory
+ or device local memory. For now, just look for at least a valid local heap/arena
+ */
+ if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
+ {
+#if defined(FIX_HW_BRN_31620)
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
+#endif
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psMMUContext->pvPDCpuVAddr,
+ psMMUContext->hPDOSMemHandle);
+
+#if defined(FIX_HW_BRN_31620)
+ /* If this is the _last_ MMU context it must be the uKernel */
+ if (!psMMUContextList->psNext)
+ {
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvBRN31620DummyPageCpuVAddr,
+ psDevInfo->hBRN31620DummyPageOSMemHandle);
+
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvBRN31620DummyPTCpuVAddr,
+ psDevInfo->hBRN31620DummyPTOSMemHandle);
+
+ }
+#endif
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* if this is the last context free the dummy pages too */
+ if(!psMMUContextList->psNext)
+ {
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvDummyPTPageCpuVAddr,
+ psDevInfo->hDummyPTPageOSMemHandle);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvDummyDataPageCpuVAddr,
+ psDevInfo->hDummyDataPageOSMemHandle);
+ }
+#endif
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+ /* derive the system physical address */
+ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->hPDOSMemHandle,
+ psMMUContext->pvPDCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+ /* unmap the CPU mapping */
+ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psMMUContext->hPDOSMemHandle);
+ /* and free the memory */
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* if this is the last context free the dummy pages too */
+ if(!psMMUContextList->psNext)
+ {
+ /* free the Dummy PT Page */
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
+ psDevInfo->pvDummyPTPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+ /* unmap the CPU mapping */
+ OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hDummyPTPageOSMemHandle);
+ /* and free the memory */
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+
+ /* free the Dummy Data Page */
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyDataPageOSMemHandle,
+ psDevInfo->pvDummyDataPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+ /* unmap the CPU mapping */
+ OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hDummyDataPageOSMemHandle);
+ /* and free the memory */
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+ /* if this is the last context free the dummy pages too */
+ if(!psMMUContextList->psNext)
+ {
+ /* free the Page */
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle,
+ psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+ /* unmap the CPU mapping */
+ OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBRN31620DummyPageOSMemHandle);
+ /* and free the memory */
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+
+ /* free the Dummy PT */
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle,
+ psDevInfo->pvBRN31620DummyPTCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+ /* unmap the CPU mapping */
+ OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPTCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBRN31620DummyPTOSMemHandle);
+ /* and free the memory */
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+#endif
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
+
+ /* remove the MMU context from the list of MMU contexts */
+ ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
+ while(*ppsMMUContext)
+ {
+ if(*ppsMMUContext == psMMUContext)
+ {
+ /* remove item from the list */
+ *ppsMMUContext = psMMUContext->psNext;
+ break;
+ }
+
+ /* advance to next next */
+ ppsMMUContext = &((*ppsMMUContext)->psNext);
+ }
+
+ /* free the context itself. */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
+ /*not nulling pointer, copy on stack*/
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_InsertHeap
+
+ PURPOSE: Copies PDEs from shared/exported heap into current MMU context.
+
+ PARAMETERS: In: psMMUContext - the mmu
+ In: psMMUHeap - a shared/exported heap
+
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
+{
+ IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
+ IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
+ IMG_UINT32 ui32PDEntry;
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
+#endif
+
+ /* advance to the first entry */
+ pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+ pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+
+ /*
+ update the PD range relating to the heap's
+ device virtual address range
+ */
+#if defined(PDUMP)
+ PDUMPCOMMENT("Page directory shared heap range copy");
+ PDUMPCOMMENT(" (Source heap MMU Context ID == %u, PT count == 0x%x)",
+ psMMUHeap->psMMUContext->ui32PDumpMMUContextID,
+ psMMUHeap->ui32PageTableCount);
+ PDUMPCOMMENT(" (Destination MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
+#endif /* PDUMP */
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(psMMUContext);
+#endif
+
+ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
+ {
+#if (!defined(SUPPORT_SGX_MMU_DUMMY_PAGE)) && (!defined(FIX_HW_BRN_31620))
+ /* check we have invalidated target PDEs */
+ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
+#endif
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ /* copy over the PDEs */
+ pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+ if (pui32PDCpuVAddr[ui32PDEntry])
+ {
+ /* Ensure the shared heap allocation is mapped into the context/PD
+ * for the active pdump process/app. The PTs and backing physical
+ * should also be pdumped (elsewhere).
+ * MALLOC (PT)
+ * LDB (init PT)
+ * MALLOC (data page)
+ * WRW (PTE->data page)
+ * LDB (init data page) -- could be useful to ensure page is initialised
+ */
+ #if defined(PDUMP)
+ //PDUMPCOMMENT("MMU_InsertHeap: Mapping shared heap to new context %d (%s)", psMMUContext->ui32PDumpMMUContextID, (psMMUContext->bPDumpActive) ? "active" : "");
+ #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+ #endif /* SUPPORT_PDUMP_MULTI_PROCESS */
+ {
+ PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ #endif
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ bInvalidateDirectoryCache = IMG_TRUE;
+#endif
+ }
+ }
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(psMMUContext);
+#endif
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ if (bInvalidateDirectoryCache)
+ {
+ /* This is actually not to do with multiple mem contexts, but to do with the directory cache.
+ In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the
+ page directory memory, and requires updating whenever the page directory changes, even if there
+ was no previous value in a particular entry
+ */
+ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
+ }
+#endif
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_UnmapPagesAndFreePTs
+
+ PURPOSE: unmap pages, invalidate virtual address and try to free the PTs
+
+ PARAMETERS: In: psMMUHeap - the mmu.
+ In: sDevVAddr - the device virtual address.
+ In: ui32PageCount - page count
+ In: hUniqueTag - A unique ID for use as a tag identifier
+
+ RETURNS: None
+******************************************************************************/
+static IMG_VOID
+MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32Tmp;
+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
+
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+ /* setup tmp devvaddr to base of allocation */
+ sTmpDevVAddr = sDevVAddr;
+
+ for(i=0; i<ui32PageCount; i++)
+ {
+ MMU_PT_INFO **ppsPTInfoList;
+
+ /* find the index/offset in PD entries */
+ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+
+ /* and advance to the first PT info list */
+ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+ {
+ /* find the index/offset of the first PT in the first PT page */
+ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
+
+ /* Is the PT page valid? */
+ if (!ppsPTInfoList[0])
+ {
+ /*
+ With sparse mappings we expect that the PT could be freed
+ before we reach the end of it as the unmapped pages don't
+ bump ui32ValidPTECount so it can reach zero before we reach
+ the end of the PT.
+ */
+ if (!psMMUHeap->bHasSparseMappings)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
+ }
+
+ /* advance the sTmpDevVAddr by one page */
+ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
+
+ /* Try to unmap the remaining allocation pages */
+ continue;
+ }
+
+ /* setup pointer to the first entry in the PT page */
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+ /* Is PTPageCpuVAddr valid ? */
+ if (!pui32Tmp)
+ {
+ continue;
+ }
+
+ CheckPT(ppsPTInfoList[0]);
+
+ /* Decrement the valid page count only if the current page is valid*/
+ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
+ {
+ ppsPTInfoList[0]->ui32ValidPTECount--;
+ }
+ else
+ {
+ if (!psMMUHeap->bHasSparseMappings)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
+ }
+ }
+
+ /* The page table count should not go below zero */
+ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
+ MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* point the PT entry to the dummy data page */
+ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+#else
+ /* invalidate entry */
+#if defined(FIX_HW_BRN_31620)
+ BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]);
+#else
+ pui32Tmp[ui32PTIndex] = 0;
+#endif
+#endif
+ MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
+ CheckPT(ppsPTInfoList[0]);
+ }
+
+ /*
+ Free a page table if we can.
+ */
+ if (ppsPTInfoList[0] && (ppsPTInfoList[0]->ui32ValidPTECount == 0)
+ )
+ {
+#if defined(FIX_HW_BRN_31620)
+ if (BRN31620FreePageTable(psMMUHeap, ui32PDIndex) == IMG_TRUE)
+ {
+ bInvalidateDirectoryCache = IMG_TRUE;
+ }
+#else
+ _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
+ bInvalidateDirectoryCache = IMG_TRUE;
+#endif
+ }
+
+ /* advance the sTmpDevVAddr by one page */
+ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
+ }
+
+ if(bInvalidateDirectoryCache)
+ {
+ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
+ }
+ else
+ {
+ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables(psMMUHeap,
+ sDevVAddr,
+ psMMUHeap->ui32DataPageSize * ui32PageCount,
+ IMG_TRUE,
+ hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_FreePageTables
+
+ PURPOSE: Call back from RA_Free to zero page table entries used by freed
+ spans.
+
+ PARAMETERS: In: pvMMUHeap
+ In: ui32Start
+ In: ui32End
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS:
+******************************************************************************/
+static IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
+ IMG_SIZE_T ui32Start,
+ IMG_SIZE_T ui32End,
+ IMG_HANDLE hUniqueTag)
+{
+ MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
+ IMG_DEV_VIRTADDR Start;
+
+ Start.uiAddr = (IMG_UINT32)ui32Start;
+
+ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (IMG_UINT32)((ui32End - ui32Start) >> pMMUHeap->ui32PTShift), hUniqueTag);
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Create
+
+ PURPOSE: Create an mmu device virtual heap.
+
+ PARAMETERS: In: psMMUContext - MMU context
+ In: psDevArena - device memory resource arena
+ Out: ppsVMArena - virtual mapping arena
+ RETURNS: MMU_HEAP
+ RETURNS:
+******************************************************************************/
+MMU_HEAP *
+MMU_Create (MMU_CONTEXT *psMMUContext,
+ DEV_ARENA_DESCRIPTOR *psDevArena,
+ RA_ARENA **ppsVMArena,
+ PDUMP_MMU_ATTRIB **ppsMMUAttrib)
+{
+ MMU_HEAP *pMMUHeap;
+ IMG_UINT32 ui32ScaleSize;
+
+ PVR_UNREFERENCED_PARAMETER(ppsMMUAttrib);
+
+ PVR_ASSERT (psDevArena != IMG_NULL);
+
+ if (psDevArena == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
+ return IMG_NULL;
+ }
+
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (MMU_HEAP),
+ (IMG_VOID **)&pMMUHeap, IMG_NULL,
+ "MMU Heap");
+ if (pMMUHeap == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
+ return IMG_NULL;
+ }
+
+ pMMUHeap->psMMUContext = psMMUContext;
+ pMMUHeap->psDevArena = psDevArena;
+
+ /*
+ generate page table and data page mask and shift values
+ based on the data page size
+ */
+ switch(pMMUHeap->psDevArena->ui32DataPageSize)
+ {
+ case 0x1000:
+ ui32ScaleSize = 0;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
+ break;
+#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
+ case 0x4000:
+ ui32ScaleSize = 2;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
+ break;
+ case 0x10000:
+ ui32ScaleSize = 4;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
+ break;
+ case 0x40000:
+ ui32ScaleSize = 6;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
+ break;
+ case 0x100000:
+ ui32ScaleSize = 8;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
+ break;
+ case 0x400000:
+ ui32ScaleSize = 10;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
+ break;
+#endif /* #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) */
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
+ goto ErrorFreeHeap;
+ }
+
+ /* number of bits of address offset into the data page */
+ pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
+ pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
+ pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
+ /* number of bits of address indexing into a pagetable */
+ pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
+ pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
+ pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
+ pMMUHeap->ui32PTSize = (IMG_UINT32)(1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
+
+ /* note: PT size must be at least 4 entries, even for 4Mb data page size */
+ if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
+ {
+ pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
+ }
+ pMMUHeap->ui32PTNumEntriesAllocated = pMMUHeap->ui32PTSize >> 2;
+
+ /* find the number of actual PT entries per PD entry range. For 4MB data
+ * pages we only use the first entry although the PT has 16 byte allocation/alignment
+ * (due to 4 LSbits of the PDE are reserved for control) */
+ pMMUHeap->ui32PTNumEntriesUsable = (IMG_UINT32)(1UL << pMMUHeap->ui32PTBitWidth);
+
+ /* number of bits of address indexing into a page directory */
+ pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
+ pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
+ pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
+
+ /* External system cache violates this rule */
+#if !defined (SUPPORT_EXTERNAL_SYSTEM_CACHE)
+ /*
+ The heap must start on a PT boundary to avoid PT sharing across heaps
+ The only exception is the first heap which can start at any address
+ from 0 to the end of the first PT boundary
+ */
+ if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
+ {
+ /*
+ if for some reason the first heap starts after the end of the first PT boundary
+ but is not aligned to a PT boundary then the assert will trigger unncessarily
+ */
+ PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
+ & (pMMUHeap->ui32DataPageMask
+ | pMMUHeap->ui32PTMask)) == 0);
+ }
+#endif
+ /* how many PT entries do we need? */
+ pMMUHeap->ui32PTETotalUsable = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
+
+ /* calculate the PD Base index for the Heap (required for page mapping) */
+ pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
+
+ /*
+ how many page tables?
+ round up to nearest entries to the nearest page table sized block
+ */
+ pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotalUsable + pMMUHeap->ui32PTNumEntriesUsable - 1)
+ >> pMMUHeap->ui32PTBitWidth;
+ PVR_ASSERT(pMMUHeap->ui32PageTableCount > 0);
+
+ /* Create the arena */
+ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
+ psDevArena->BaseDevVAddr.uiAddr,
+ psDevArena->ui32Size,
+ IMG_NULL,
+ MAX(HOST_PAGESIZE(), pMMUHeap->ui32DataPageSize),
+ IMG_NULL,
+ IMG_NULL,
+ &MMU_FreePageTables,
+ pMMUHeap);
+
+ if (pMMUHeap->psVMArena == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
+ goto ErrorFreePagetables;
+ }
+
+#if defined(PDUMP)
+ /* setup per-heap PDUMP MMU attributes */
+ MMU_SetPDumpAttribs(&pMMUHeap->sMMUAttrib,
+ psMMUContext->psDeviceNode,
+ pMMUHeap->ui32DataPageMask,
+ pMMUHeap->ui32PTSize);
+ *ppsMMUAttrib = &pMMUHeap->sMMUAttrib;
+
+ PDUMPCOMMENT("Create MMU device from arena %s (Size == 0x%x, DataPageSize == 0x%x, BaseDevVAddr == 0x%x)",
+ psDevArena->pszName,
+ psDevArena->ui32Size,
+ pMMUHeap->ui32DataPageSize,
+ psDevArena->BaseDevVAddr.uiAddr);
+#endif /* PDUMP */
+
+ /*
+ And return the RA for VM arena management
+ */
+ *ppsVMArena = pMMUHeap->psVMArena;
+
+ return pMMUHeap;
+
+ /* drop into here if errors */
+ErrorFreePagetables:
+ _DeferredFreePageTables (pMMUHeap);
+
+ErrorFreeHeap:
+ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
+ /*not nulling pointer, out of scope*/
+
+ return IMG_NULL;
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Delete
+
+ PURPOSE: Delete an MMU device virtual heap.
+
+ PARAMETERS: In: pMMUHeap - The MMU heap to delete.
+ RETURNS:
+******************************************************************************/
+IMG_VOID
+MMU_Delete (MMU_HEAP *pMMUHeap)
+{
+ if (pMMUHeap != IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
+
+ if(pMMUHeap->psVMArena)
+ {
+ RA_Delete (pMMUHeap->psVMArena);
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Delete MMU device from arena %s (BaseDevVAddr == 0x%x, PT count for deferred free == 0x%x)",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr,
+ pMMUHeap->ui32PageTableCount);
+#endif /* PDUMP */
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(pMMUHeap->psMMUContext);
+#endif
+ _DeferredFreePageTables (pMMUHeap);
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(pMMUHeap->psMMUContext);
+#endif
+
+ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
+ /*not nulling pointer, copy on stack*/
+ }
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Alloc
+ PURPOSE: Allocate space in an mmu's virtual address space.
+ PARAMETERS: In: pMMUHeap - MMU to allocate on.
+ In: uSize - Size in bytes to allocate.
+ Out: pActualSize - If non null receives actual size allocated.
+ In: uFlags - Allocation flags.
+ In: uDevVAddrAlignment - Required alignment.
+ Out: DevVAddr - Receives base address of allocation.
+ RETURNS: IMG_TRUE - Success
+ IMG_FALSE - Failure
+******************************************************************************/
+IMG_BOOL
+MMU_Alloc (MMU_HEAP *pMMUHeap,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *psDevVAddr)
+{
+ IMG_BOOL bStatus;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
+ uSize, uFlags, uDevVAddrAlignment));
+
+ /*
+ Only allocate a VM address if the caller did not supply one
+ */
+ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
+ {
+ IMG_UINTPTR_T uiAddr;
+
+ bStatus = RA_Alloc (pMMUHeap->psVMArena,
+ uSize,
+ pActualSize,
+ IMG_NULL,
+ 0,
+ uDevVAddrAlignment,
+ 0,
+ IMG_NULL,
+ 0,
+ &uiAddr);
+ if(!bStatus)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Alloc of DevVAddr failed from heap %s ID%d",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+ return bStatus;
+ }
+
+ psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
+ }
+
+ #ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(pMMUHeap->psMMUContext);
+ #endif
+
+ /* allocate page tables to cover allocation as required */
+ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, (IMG_UINT32)uSize);
+
+ #ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(pMMUHeap->psMMUContext);
+ #endif
+
+ if (!bStatus)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to alloc pagetable(s) for DevVAddr 0x%8.8x from heap %s ID%d",
+ psDevVAddr->uiAddr,
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
+ {
+ /* free the VM address */
+ RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
+ }
+ }
+
+ return bStatus;
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Free
+ PURPOSE: Free space in an mmu's virtual address space.
+ PARAMETERS: In: pMMUHeap - MMU to deallocate on.
+ In: DevVAddr - Base address to deallocate.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
+{
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+ if (pMMUHeap == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%08X from heap %s ID%d",
+ DevVAddr.uiAddr,
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+
+ if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
+ (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
+ {
+ RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't free DevVAddr %08X from heap %s ID%d (not in range of heap))",
+ DevVAddr.uiAddr,
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Enable
+
+ PURPOSE: Enable an mmu. Establishes pages tables and takes the mmu out
+ of bypass and waits for the mmu to acknowledge enabled.
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_Enable (MMU_HEAP *pMMUHeap)
+{
+ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
+ /* SGX mmu is always enabled (stub function) */
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_Disable
+
+ PURPOSE: Disable an mmu, takes the mmu into bypass.
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_Disable (MMU_HEAP *pMMUHeap)
+{
+ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
+ /* SGX mmu is always enabled (stub function) */
+}
+
+#if defined(FIX_HW_BRN_31620)
+/*!
+******************************************************************************
+ FUNCTION: MMU_GetCacheFlushRange
+
+ PURPOSE: Gets device physical address of the mmu context.
+
+ PARAMETERS: In: pMMUContext - the mmu context
+ Out: pui32RangeMask - Bit mask showing which PD cache
+ lines have changed
+ RETURNS: None
+******************************************************************************/
+
+IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask)
+{
+ IMG_UINT32 i;
+
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ pui32RangeMask[i] = pMMUContext->ui32PDChangeMask[i];
+
+ /* Clear bit mask for the next set of allocations */
+ pMMUContext->ui32PDChangeMask[i] = 0;
+ }
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_GetPDPhysAddr
+
+ PURPOSE: Gets device physical address of the mmu contexts PD.
+
+ PARAMETERS: In: pMMUContext - the mmu context
+ Out: psDevPAddr - Address of PD
+ RETURNS: None
+******************************************************************************/
+
+IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr)
+{
+ *psDevPAddr = pMMUContext->sPDDevPAddr;
+}
+
+#endif
+#if defined(PDUMP)
+/*!
+******************************************************************************
+ FUNCTION: MMU_PDumpPageTables
+
+ PURPOSE: PDump the linear mapping for a range of pages at a specified
+ virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: uSize - size of memory range in bytes
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS: None
+******************************************************************************/
+static IMG_VOID
+MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_BOOL bForUnmap,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 ui32NumPTEntries;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32PTEntry;
+
+ MMU_PT_INFO **ppsPTInfoList;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTDumpCount;
+
+#if defined(FIX_HW_BRN_31620)
+ PVRSRV_SGXDEV_INFO *psDevInfo = pMMUHeap->psMMUContext->psDevInfo;
+#endif
+ /* find number of PT entries to dump */
+ ui32NumPTEntries = (IMG_UINT32)((uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift);
+
+ /* find the index/offset in PD entries */
+ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* set the base PT info */
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+ /* find the index/offset of the first PT entry in the first PT page */
+ ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
+
+ /* pdump the PT Page modification */
+ PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
+
+ /* walk the PT pages, dumping as we go */
+ while(ui32NumPTEntries > 0)
+ {
+ MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
+
+ if(ui32NumPTEntries <= pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex)
+ {
+ ui32PTDumpCount = ui32NumPTEntries;
+ }
+ else
+ {
+ ui32PTDumpCount = pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex;
+ }
+
+ if (psPTInfo)
+ {
+#if defined(FIX_HW_BRN_31620)
+ IMG_UINT32 i;
+#endif
+ IMG_UINT32 ui32Flags = 0;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+ pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
+#if defined(FIX_HW_BRN_31620)
+ if ((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX)
+ {
+ for (i=ui32PTIndex;i<(ui32PTIndex + ui32PTDumpCount);i++)
+ {
+ if (pui32PTEntry[i] == ((psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID))
+ {
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
+ }
+ else
+ {
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
+ }
+ }
+ }
+ else
+#endif
+ {
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
+ }
+ }
+
+ /* decrement PT entries left */
+ ui32NumPTEntries -= ui32PTDumpCount;
+
+ /* reset offset in page */
+ ui32PTIndex = 0;
+
+#if defined(FIX_HW_BRN_31620)
+ /* For 31620 we need to know which PD index we're working on */
+ ui32PDIndex++;
+#endif
+ }
+
+ PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
+}
+#endif /* #if defined(PDUMP) */
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapPage
+
+ PURPOSE: Create a mapping for one page at a specified virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: DevPAddr - the device physical address of the page to map.
+ In: ui32MemFlags - BM r/w/cache flags
+ RETURNS: None
+******************************************************************************/
+static IMG_VOID
+MMU_MapPage (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_DEV_PHYADDR DevPAddr,
+ IMG_UINT32 ui32MemFlags)
+{
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 ui32MMUFlags = 0;
+ MMU_PT_INFO **ppsPTInfoList;
+
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ /*
+ unravel the read/write/cache flags
+ */
+ if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
+ {
+ /* read/write */
+ ui32MMUFlags = 0;
+ }
+ else if(PVRSRV_MEM_READ & ui32MemFlags)
+ {
+ /* read only */
+ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
+ }
+ else if(PVRSRV_MEM_WRITE & ui32MemFlags)
+ {
+ /* write only */
+ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
+ }
+
+ /* cache coherency */
+ if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
+ {
+ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
+ }
+
+#if !defined(FIX_HW_BRN_25503)
+ /* EDM protection */
+ if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
+ {
+ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
+ }
+#endif
+
+ /*
+ we receive a device physical address for the page that is to be mapped
+ and a device virtual address representing where it should be mapped to
+ */
+
+ /* find the index/offset in PD entries */
+ ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* and advance to the first PT info list */
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
+
+ CheckPT(ppsPTInfoList[0]);
+
+ /* find the index/offset of the first PT in the first PT page */
+ ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
+
+ /* setup pointer to the first entry in the PT page */
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ {
+ IMG_UINT32 uTmp = pui32Tmp[ui32Index];
+
+ /* Is the current page already valid? (should not be unless it was allocated and not deallocated) */
+#if defined(FIX_HW_BRN_31620)
+ if ((uTmp & SGX_MMU_PTE_VALID) && ((DevVAddr.uiAddr & BRN31620_PDE_CACHE_FILL_MASK) != BRN31620_DUMMY_PAGE_OFFSET))
+#else
+ if ((uTmp & SGX_MMU_PTE_VALID) != 0)
+#endif
+
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08X PDIdx:%u PTIdx:%u",
+ DevVAddr.uiAddr,
+ DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
+ ui32Index ));
+ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08X", uTmp));
+ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08X", DevPAddr.uiAddr));
+#if PT_DUMP
+ DumpPT(ppsPTInfoList[0]);
+#endif
+ }
+#if !defined(FIX_HW_BRN_31620)
+ PVR_ASSERT((uTmp & SGX_MMU_PTE_VALID) == 0);
+#endif
+ }
+#endif
+
+ /* One more valid entry in the page table. */
+ ppsPTInfoList[0]->ui32ValidPTECount++;
+
+ MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
+ /* map in the physical page */
+ pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
+ | SGX_MMU_PTE_VALID
+ | ui32MMUFlags;
+ MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
+ CheckPT(ppsPTInfoList[0]);
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapScatter
+
+ PURPOSE: Create a linear mapping for a range of pages at a specified
+ virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: psSysAddr - the device physical address of the page to
+ map.
+ In: uSize - size of memory range in bytes
+ In: ui32MemFlags - page table flags.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapScatter (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+#if defined(PDUMP)
+ IMG_DEV_VIRTADDR MapBaseDevVAddr;
+#endif /*PDUMP*/
+ IMG_UINT32 uCount, i;
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+#if defined(PDUMP)
+ MapBaseDevVAddr = DevVAddr;
+#else
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif /*PDUMP*/
+
+ for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize)
+ {
+ IMG_SYS_PHYADDR sSysAddr;
+
+ sSysAddr = psSysAddr[i];
+
+
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
+
+ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
+ DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
+ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapPages
+
+ PURPOSE: Create a linear mapping for a ranege of pages at a specified
+ virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: SysPAddr - the system physical address of the page to
+ map.
+ In: uSize - size of memory range in bytes
+ In: ui32MemFlags - page table flags.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapPages (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+#if defined(PDUMP)
+ IMG_DEV_VIRTADDR MapBaseDevVAddr;
+#endif /*PDUMP*/
+ IMG_UINT32 uCount;
+ IMG_UINT32 ui32VAdvance;
+ IMG_UINT32 ui32PAdvance;
+
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPages: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=%08X, size=0x%x",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID,
+ DevVAddr.uiAddr,
+ SysPAddr.uiAddr,
+ uSize));
+
+ /* set the virtual and physical advance */
+ ui32VAdvance = pMMUHeap->ui32DataPageSize;
+ ui32PAdvance = pMMUHeap->ui32DataPageSize;
+
+#if defined(PDUMP)
+ MapBaseDevVAddr = DevVAddr;
+#else
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif /*PDUMP*/
+
+ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
+
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ /*
+ for dummy allocations there is only one physical
+ page backing the virtual range
+ */
+ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
+ {
+ ui32PAdvance = 0;
+ }
+
+ for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
+ {
+ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
+ DevVAddr.uiAddr += ui32VAdvance;
+ DevPAddr.uiAddr += ui32PAdvance;
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapPagesSparse
+
+ PURPOSE: Create a linear mapping for a ranege of pages at a specified
+ virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: SysPAddr - the system physical address of the page to
+ map.
+ In: ui32ChunkSize - Size of the chunk (must be page multiple)
+ In: ui32NumVirtChunks - Number of virtual chunks
+ In: ui32NumPhysChunks - Number of physical chunks
+ In: pabMapChunk - Mapping array
+ In: ui32MemFlags - page table flags.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapPagesSparse (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_BOOL *pabMapChunk,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+#if defined(PDUMP)
+ IMG_DEV_VIRTADDR MapBaseDevVAddr;
+#endif /*PDUMP*/
+ IMG_UINT32 uCount;
+ IMG_UINT32 ui32VAdvance;
+ IMG_UINT32 ui32PAdvance;
+ IMG_SIZE_T uSizeVM = ui32ChunkSize * ui32NumVirtChunks;
+#if !defined(PVRSRV_NEED_PVR_DPF)
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+#endif
+
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPagesSparse: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=%08X, VM space=0x%x, PHYS space=0x%x",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID,
+ DevVAddr.uiAddr,
+ SysPAddr.uiAddr,
+ uSizeVM,
+ ui32ChunkSize * ui32NumPhysChunks));
+
+ /* set the virtual and physical advance */
+ ui32VAdvance = pMMUHeap->ui32DataPageSize;
+ ui32PAdvance = pMMUHeap->ui32DataPageSize;
+
+#if defined(PDUMP)
+ MapBaseDevVAddr = DevVAddr;
+#else
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif /*PDUMP*/
+
+ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
+
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ /*
+ for dummy allocations there is only one physical
+ page backing the virtual range
+ */
+ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
+ {
+ ui32PAdvance = 0;
+ }
+
+ for (uCount=0; uCount<uSizeVM; uCount+=ui32VAdvance)
+ {
+ if (pabMapChunk[uCount/ui32ChunkSize])
+ {
+ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
+ DevPAddr.uiAddr += ui32PAdvance;
+ }
+ DevVAddr.uiAddr += ui32VAdvance;
+ }
+ pMMUHeap->bHasSparseMappings = IMG_TRUE;
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSizeVM, IMG_FALSE, hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapShadow
+
+ PURPOSE: Create a mapping for a range of pages from either a CPU
+ virtual adddress, (or if NULL a hOSMemHandle) to a specified
+ device virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: MapBaseDevVAddr - A page aligned device virtual address
+ to start mapping from.
+ In: uByteSize - A page aligned mapping length in bytes.
+ In: CpuVAddr - A page aligned CPU virtual address.
+ In: hOSMemHandle - An alternative OS specific memory handle
+ for mapping RAM without a CPU virtual
+ address
+ Out: pDevVAddr - deprecated - It used to return a byte aligned
+ device virtual address corresponding to the
+ cpu virtual address (When CpuVAddr wasn't
+ constrained to be page aligned.) Now it just
+ returns MapBaseDevVAddr. Unaligned semantics
+ can easily be handled above this API if required.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ In: ui32MemFlags - page table flags.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapShadow (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_SIZE_T uByteSize,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR *pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 i;
+ IMG_UINT32 uOffset = 0;
+ IMG_DEV_VIRTADDR MapDevVAddr;
+ IMG_UINT32 ui32VAdvance;
+ IMG_UINT32 ui32PAdvance;
+
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_MapShadow: DevVAddr:%08X, Bytes:0x%x, CPUVAddr:%08X",
+ MapBaseDevVAddr.uiAddr,
+ uByteSize,
+ (IMG_UINTPTR_T)CpuVAddr));
+
+ /* set the virtual and physical advance */
+ ui32VAdvance = pMMUHeap->ui32DataPageSize;
+ ui32PAdvance = pMMUHeap->ui32DataPageSize;
+
+ /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */
+ PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
+ PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
+ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
+
+ /*
+ for dummy allocations there is only one physical
+ page backing the virtual range
+ */
+ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
+ {
+ ui32PAdvance = 0;
+ }
+
+ /* Loop through cpu memory and map page by page */
+ MapDevVAddr = MapBaseDevVAddr;
+ for (i=0; i<uByteSize; i+=ui32VAdvance)
+ {
+ IMG_CPU_PHYADDR CpuPAddr;
+ IMG_DEV_PHYADDR DevPAddr;
+
+ if(CpuVAddr)
+ {
+ CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle,
+ (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset));
+ }
+ else
+ {
+ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
+ }
+ DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
+
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "Offset=0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
+ uOffset,
+ (IMG_UINTPTR_T)CpuVAddr + uOffset,
+ CpuPAddr.uiAddr,
+ MapDevVAddr.uiAddr,
+ DevPAddr.uiAddr));
+
+ MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
+
+ /* loop update */
+ MapDevVAddr.uiAddr += ui32VAdvance;
+ uOffset += ui32PAdvance;
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapShadowSparse
+
+ PURPOSE: Create a mapping for a range of pages from either a CPU
+ virtual adddress, (or if NULL a hOSMemHandle) to a specified
+ device virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: MapBaseDevVAddr - A page aligned device virtual address
+ to start mapping from.
+ In: ui32ChunkSize - Size of the chunk (must be page multiple)
+ In: ui32NumVirtChunks - Number of virtual chunks
+ In: ui32NumPhysChunks - Number of physical chunks
+ In: pabMapChunk - Mapping array
+ In: CpuVAddr - A page aligned CPU virtual address.
+ In: hOSMemHandle - An alternative OS specific memory handle
+ for mapping RAM without a CPU virtual
+ address
+ Out: pDevVAddr - deprecated - It used to return a byte aligned
+ device virtual address corresponding to the
+ cpu virtual address (When CpuVAddr wasn't
+ constrained to be page aligned.) Now it just
+ returns MapBaseDevVAddr. Unaligned semantics
+ can easily be handled above this API if required.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ In: ui32MemFlags - page table flags.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapShadowSparse (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_BOOL *pabMapChunk,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR *pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 i;
+ IMG_UINT32 uOffset = 0;
+ IMG_DEV_VIRTADDR MapDevVAddr;
+ IMG_UINT32 ui32VAdvance;
+ IMG_UINT32 ui32PAdvance;
+ IMG_SIZE_T uiSizeVM = ui32ChunkSize * ui32NumVirtChunks;
+ IMG_UINT32 ui32ChunkIndex = 0;
+ IMG_UINT32 ui32ChunkOffset = 0;
+#if !defined(PVRSRV_NEED_PVR_DPF)
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+#endif
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_MapShadowSparse: DevVAddr:%08X, VM space:0x%x, CPUVAddr:%08X PHYS space:0x%x",
+ MapBaseDevVAddr.uiAddr,
+ uiSizeVM,
+ (IMG_UINTPTR_T)CpuVAddr,
+ ui32ChunkSize * ui32NumPhysChunks));
+
+ /* set the virtual and physical advance */
+ ui32VAdvance = pMMUHeap->ui32DataPageSize;
+ ui32PAdvance = pMMUHeap->ui32DataPageSize;
+
+ /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */
+ PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
+ PVR_ASSERT(((IMG_UINT32)uiSizeVM & pMMUHeap->ui32DataPageMask) == 0);
+ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
+
+ /* Shouldn't come through the sparse interface */
+ PVR_ASSERT((ui32MemFlags & PVRSRV_MEM_DUMMY) == 0);
+
+ /* Loop through cpu memory and map page by page */
+ MapDevVAddr = MapBaseDevVAddr;
+ for (i=0; i<uiSizeVM; i+=ui32VAdvance)
+ {
+ IMG_CPU_PHYADDR CpuPAddr;
+ IMG_DEV_PHYADDR DevPAddr;
+
+ if (pabMapChunk[i/ui32ChunkSize])
+ /*if (pabMapChunk[ui32ChunkIndex])*/
+ {
+ if(CpuVAddr)
+ {
+ CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle,
+ (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset));
+ }
+ else
+ {
+ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
+ }
+ DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
+
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "Offset=0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
+ uOffset,
+ (IMG_UINTPTR_T)CpuVAddr + uOffset,
+ CpuPAddr.uiAddr,
+ MapDevVAddr.uiAddr,
+ DevPAddr.uiAddr));
+
+ MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
+ uOffset += ui32PAdvance;
+ }
+
+ /* loop update */
+ MapDevVAddr.uiAddr += ui32VAdvance;
+
+ if (ui32ChunkOffset == ui32ChunkSize)
+ {
+ ui32ChunkIndex++;
+ ui32ChunkOffset = 0;
+ }
+ }
+
+ pMMUHeap->bHasSparseMappings = IMG_TRUE;
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uiSizeVM, IMG_FALSE, hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_UnmapPages
+
+ PURPOSE: unmap pages and invalidate virtual address
+
+ PARAMETERS: In: psMMUHeap - the mmu.
+ In: sDevVAddr - the device virtual address.
+ In: ui32PageCount - page count
+ In: hUniqueTag - A unique ID for use as a tag identifier
+
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_UnmapPages (MMU_HEAP *psMMUHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32Tmp;
+
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ /* setup tmp devvaddr to base of allocation */
+ sTmpDevVAddr = sDevVAddr;
+
+ for(i=0; i<ui32PageCount; i++)
+ {
+ MMU_PT_INFO **ppsPTInfoList;
+
+ /* find the index/offset in PD entries */
+ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+
+ /* and advance to the first PT info list */
+ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+ /* find the index/offset of the first PT in the first PT page */
+ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
+
+ /* Is the PT page valid? */
+ if ((!ppsPTInfoList[0]) && (!psMMUHeap->bHasSparseMappings))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
+ sTmpDevVAddr.uiAddr,
+ sDevVAddr.uiAddr,
+ i,
+ ui32PDIndex,
+ ui32PTIndex));
+
+ /* advance the sTmpDevVAddr by one page */
+ sTmpDevVAddr.uiAddr += uPageSize;
+
+ /* Try to unmap the remaining allocation pages */
+ continue;
+ }
+
+ CheckPT(ppsPTInfoList[0]);
+
+ /* setup pointer to the first entry in the PT page */
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+ /* Decrement the valid page count only if the current page is valid*/
+ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
+ {
+ ppsPTInfoList[0]->ui32ValidPTECount--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
+ sTmpDevVAddr.uiAddr,
+ sDevVAddr.uiAddr,
+ i,
+ ui32PDIndex,
+ ui32PTIndex));
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08X", pui32Tmp[ui32PTIndex]));
+ }
+
+ /* The page table count should not go below zero */
+ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
+
+ MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* point the PT entry to the dummy data page */
+ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+#else
+ /* invalidate entry */
+#if defined(FIX_HW_BRN_31620)
+ BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]);
+#else
+ pui32Tmp[ui32PTIndex] = 0;
+#endif
+#endif
+ MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
+
+ CheckPT(ppsPTInfoList[0]);
+
+ /* advance the sTmpDevVAddr by one page */
+ sTmpDevVAddr.uiAddr += uPageSize;
+ }
+
+ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
+#endif /* #if defined(PDUMP) */
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_GetPhysPageAddr
+
+ PURPOSE: extracts physical address from MMU page tables
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ PARAMETERS: In: sDevVPageAddr - the virtual address to extract physical
+ page mapping from
+ RETURNS: None
+******************************************************************************/
+IMG_DEV_PHYADDR
+MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
+{
+ IMG_UINT32 *pui32PageTable;
+ IMG_UINT32 ui32Index;
+ IMG_DEV_PHYADDR sDevPAddr;
+ MMU_PT_INFO **ppsPTInfoList;
+
+ /* find the index/offset in PD entries */
+ ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+ /* and advance to the first PT info list */
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
+ if (!ppsPTInfoList[0])
+ {
+ /* Heaps with sparse mappings are allowed invalid pages */
+ if (!pMMUHeap->bHasSparseMappings)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
+ }
+ sDevPAddr.uiAddr = 0;
+ return sDevPAddr;
+ }
+
+ /* find the index/offset of the first PT in the first PT page */
+ ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
+
+ /* setup pointer to the first entry in the PT page */
+ pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+ /* read back physical page */
+ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
+
+ /* Mask off non-address bits */
+ sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
+
+ /* and align the address */
+ sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
+
+ return sDevPAddr;
+}
+
+
+IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
+{
+ return (pMMUContext->sPDDevPAddr);
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: SGXGetPhysPageAddr
+
+ PURPOSE: Gets DEV and CPU physical address of sDevVAddr
+
+ PARAMETERS: In: hDevMemHeap - device mem heap handle
+ PARAMETERS: In: sDevVAddr - the base virtual address to unmap from
+ PARAMETERS: Out: pDevPAddr - DEV physical address
+ PARAMETERS: Out: pCpuPAddr - CPU physical address
+ RETURNS: None
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEV_PHYADDR *pDevPAddr,
+ IMG_CPU_PHYADDR *pCpuPAddr)
+{
+ MMU_HEAP *pMMUHeap;
+ IMG_DEV_PHYADDR DevPAddr;
+
+ /*
+ Get MMU Heap From hDevMemHeap
+ */
+ pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
+
+ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
+ pCpuPAddr->uiAddr = DevPAddr.uiAddr; /* SysDevPAddrToCPUPAddr(DevPAddr) */
+ pDevPAddr->uiAddr = DevPAddr.uiAddr;
+
+ return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: SGXGetMMUPDAddrKM
+
+ PURPOSE: Gets PD device physical address of hDevMemContext
+
+ PARAMETERS: In: hDevCookie - device cookie
+ PARAMETERS: In: hDevMemContext - memory context
+ PARAMETERS: Out: psPDDevPAddr - MMU PD address
+ RETURNS: None
+******************************************************************************/
+PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_PHYADDR *psPDDevPAddr)
+{
+ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* return the address */
+ *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_BIFResetPDAlloc
+
+ PURPOSE: Allocate a dummy Page Directory, Page Table and Page which can
+ be used for dynamic dummy page mapping during SGX reset.
+ Note: since this is only used for hardware recovery, no
+ pdumping is performed.
+
+ PARAMETERS: In: psDevInfo - device info
+ RETURNS: PVRSRV_OK or error
+******************************************************************************/
+PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ IMG_HANDLE hOSMemHandle = IMG_NULL;
+ IMG_BYTE *pui8MemBlock = IMG_NULL;
+ IMG_SYS_PHYADDR sMemBlockSysPAddr;
+ IMG_CPU_PHYADDR sMemBlockCpuPAddr;
+
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+ /* allocate 3 pages - for the PD, PT and dummy page */
+ if(psLocalDevMemArena == IMG_NULL)
+ {
+ /* UMA system */
+ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ 3 * SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ IMG_NULL,
+ (IMG_VOID **)&pui8MemBlock,
+ &hOSMemHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
+ return eError;
+ }
+
+ /* translate address to device physical */
+ if(pui8MemBlock)
+ {
+ sMemBlockCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle,
+ pui8MemBlock);
+ }
+ else
+ {
+ /* This isn't used in all cases since not all ports currently support
+ * OSMemHandleToCpuPAddr() */
+ sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
+ }
+ }
+ else
+ {
+ /* non-UMA system */
+
+ if(RA_Alloc(psLocalDevMemArena,
+ 3 * SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* derive the CPU virtual address */
+ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
+ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
+ SGX_MMU_PAGE_SIZE * 3,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &hOSMemHandle);
+ if(!pui8MemBlock)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ }
+
+ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
+ psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
+ psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
+ psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
+ /* override pointer cast warnings */
+ /* PRQA S 3305,509 2 */
+ psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
+ psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
+
+ /* Invalidate entire PD and PT. */
+ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
+ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
+ /* Fill dummy page with markers. */
+ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_BIFResetPDFree
+
+ PURPOSE: Free resources allocated in MMU_BIFResetPDAlloc.
+
+ PARAMETERS: In: psDevInfo - device info
+ RETURNS:
+******************************************************************************/
+IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ IMG_SYS_PHYADDR sPDSysPAddr;
+
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+ /* free the page directory */
+ if(psLocalDevMemArena == IMG_NULL)
+ {
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ 3 * SGX_MMU_PAGE_SIZE,
+ psDevInfo->pui32BIFResetPD,
+ psDevInfo->hBIFResetPDOSMemHandle);
+ }
+ else
+ {
+ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
+ 3 * SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBIFResetPDOSMemHandle);
+
+ sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
+ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
+ }
+}
+
+IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32FaultAddr)
+{
+ MMU_CONTEXT *psMMUContext = psDevInfo->pvMMUContextList;
+
+ while (psMMUContext && (psMMUContext->sPDDevPAddr.uiAddr != ui32PDDevPAddr))
+ {
+ psMMUContext = psMMUContext->psNext;
+ }
+
+ if (psMMUContext)
+ {
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 ui32PDIndex;
+
+ PVR_LOG(("Found MMU context for page fault 0x%08x", ui32FaultAddr));
+ PVR_LOG(("GPU memory context is for PID=%d (%s)", psMMUContext->ui32PID, psMMUContext->szName));
+
+ ui32PTIndex = (ui32FaultAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+ ui32PDIndex = (ui32FaultAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PT_SHIFT + SGX_MMU_PAGE_SHIFT);
+
+ if (psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
+ {
+ IMG_UINT32 *pui32Ptr = psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+ IMG_UINT32 ui32PTE = pui32Ptr[ui32PTIndex];
+
+ PVR_LOG(("PDE valid: PTE = 0x%08x (PhysAddr = 0x%08x, %s)",
+ ui32PTE,
+ ui32PTE & SGX_MMU_PTE_ADDR_MASK,
+ ui32PTE & SGX_MMU_PTE_VALID?"valid":"Invalid"));
+ }
+ else
+ {
+ PVR_LOG(("Found PT info but no CPU address"));
+ }
+ }
+ else
+ {
+ PVR_LOG(("No PDE found"));
+ }
+ }
+}
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+/*!
+******************************************************************************
+ FUNCTION: MMU_MapExtSystemCacheRegs
+
+ PURPOSE: maps external system cache control registers into SGX MMU
+
+ PARAMETERS: In: psDeviceNode - device node
+ RETURNS:
+******************************************************************************/
+PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT32 *pui32PT;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+ sMMUAttrib = psDevInfo->sMMUAttrib;
+#if defined(PDUMP)
+ MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
+ SGX_MMU_PAGE_MASK,
+ SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
+#endif
+
+#if defined(PDUMP)
+ {
+ IMG_CHAR szScript[128];
+
+ sprintf(szScript, "MALLOC :EXTSYSCACHE:PA_%08X%08X %u %u 0x%08X\r\n", 0, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr);
+ PDumpOSWriteString2(szScript, PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+
+ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+ pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+
+ MakeKernelPageReadWrite(pui32PT);
+ /* map the PT to the registers */
+ pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+ MakeKernelPageReadOnly(pui32PT);
+#if defined(PDUMP)
+ /* Add the entery to the PT */
+ {
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_UINT32 ui32PageMask;
+ IMG_UINT32 ui32PTE;
+ PVRSRV_ERROR eErr;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+ ui32PageMask = sMMUAttrib.ui32PTSize - 1;
+ sCpuPAddr = OSMapLinToCPUPhys(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->hPTPageOSMemHandle, &pui32PT[ui32PTIndex]);
+ sDevPAddr = SysCpuPAddrToDevPAddr(sMMUAttrib.sDevId.eDeviceType, sCpuPAddr);
+ ui32PTE = *((IMG_UINT32 *) (&pui32PT[ui32PTIndex]));
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n",
+ sMMUAttrib.sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)PDUMP_PT_UNIQUETAG,
+ (sDevPAddr.uiAddr) & ~ui32PageMask,
+ (sDevPAddr.uiAddr) & ui32PageMask,
+ "EXTSYSCACHE",
+ (IMG_UINT32)(IMG_UINTPTR_T)PDUMP_PD_UNIQUETAG,
+ (ui32PTE & sMMUAttrib.ui32PDEMask) << sMMUAttrib.ui32PTEAlignShift,
+ ui32PTE & ~sMMUAttrib.ui32PDEMask);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+ FUNCTION: MMU_UnmapExtSystemCacheRegs
+
+ PURPOSE: unmaps external system cache control registers
+
+ PARAMETERS: In: psDeviceNode - device node
+ RETURNS:
+******************************************************************************/
+PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32PT;
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+ sMMUAttrib = psDevInfo->sMMUAttrib;
+
+#if defined(PDUMP)
+ MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
+ SGX_MMU_PAGE_MASK,
+ SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
+#endif
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+ /* unmap the MMU page table from the PD */
+ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+ /* Only unmap it if the PT hasn't already been freed */
+ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
+ {
+ pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+ }
+ }
+
+ MakeKernelPageReadWrite(pui32PT);
+ pui32PT[ui32PTIndex] = 0;
+ MakeKernelPageReadOnly(pui32PT);
+
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->hPDOSMemHandle, &pui32PT[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+
+ return PVRSRV_OK;
+}
+#endif
+
+
+#if PAGE_TEST
+/*!
+******************************************************************************
+ FUNCTION: PageTest
+
+ PURPOSE: Tests page table memory, for use during device bring-up.
+
+ PARAMETERS: In: void* pMem - page address (CPU mapped)
+ PARAMETERS: In: IMG_DEV_PHYADDR sDevPAddr - page device phys address
+ RETURNS: None, provides debug output and breaks if an error is detected.
+******************************************************************************/
+static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
+{
+ volatile IMG_UINT32 ui32WriteData;
+ volatile IMG_UINT32 ui32ReadData;
+ volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
+ IMG_INT n;
+ IMG_BOOL bOK=IMG_TRUE;
+
+ ui32WriteData = 0xffffffff;
+
+ for (n=0; n<1024; n++)
+ {
+ pMem32[n] = ui32WriteData;
+ ui32ReadData = pMem32[n];
+
+ if (ui32WriteData != ui32ReadData)
+ {
+ // Mem fault
+ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
+ PVR_DBG_BREAK;
+ bOK = IMG_FALSE;
+ }
+ }
+
+ ui32WriteData = 0;
+
+ for (n=0; n<1024; n++)
+ {
+ pMem32[n] = ui32WriteData;
+ ui32ReadData = pMem32[n];
+
+ if (ui32WriteData != ui32ReadData)
+ {
+ // Mem fault
+ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
+ PVR_DBG_BREAK;
+ bOK = IMG_FALSE;
+ }
+ }
+
+ if (bOK)
+ {
+ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
+ }
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu.c)
+******************************************************************************/
+
+
diff --git a/pvr-source/services4/srvkm/devices/sgx/mmu.h b/pvr-source/services4/srvkm/devices/sgx/mmu.h
new file mode 100644
index 0000000..3c849fc
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/mmu.h
@@ -0,0 +1,501 @@
+/*************************************************************************/ /*!
+@Title MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _MMU_H_
+#define _MMU_H_
+
+#include "sgxinfokm.h"
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Initialise
+
+ PURPOSE: Initialise the mmu module.
+
+ PARAMETERS: None
+ RETURNS: PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR
+MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Finalise
+
+ PURPOSE: Finalise the mmu module, deallocate all resources.
+
+ PARAMETERS: None.
+ RETURNS: None.
+******************************************************************************/
+IMG_VOID
+MMU_Finalise (MMU_CONTEXT *psMMUContext);
+
+
+/*
+******************************************************************************
+ FUNCTION: MMU_InsertHeap
+
+ PURPOSE: Inserts shared heap into the specified context
+ from the kernel context
+
+ PARAMETERS: None.
+ RETURNS: None.
+******************************************************************************/
+IMG_VOID
+MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Create
+
+ PURPOSE: Create an mmu device.
+
+ PARAMETERS: In: psMMUContext -
+ In: psDevArena -
+ Out: ppsVMArena
+ RETURNS: MMU_HEAP
+******************************************************************************/
+MMU_HEAP *
+MMU_Create (MMU_CONTEXT *psMMUContext,
+ DEV_ARENA_DESCRIPTOR *psDevArena,
+ RA_ARENA **ppsVMArena,
+ PDUMP_MMU_ATTRIB **ppsMMUAttrib);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Delete
+
+ PURPOSE: Delete an mmu device.
+
+ PARAMETERS: In: pMMUHeap - The mmu to delete.
+ RETURNS:
+******************************************************************************/
+IMG_VOID
+MMU_Delete (MMU_HEAP *pMMUHeap);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Alloc
+ PURPOSE: Allocate space in an mmu's virtual address space.
+ PARAMETERS: In: pMMUHeap - MMU to allocate on.
+ In: uSize - Size in bytes to allocate.
+ Out: pActualSize - If non null receives actual size allocated.
+ In: uFlags - Allocation flags.
+ In: uDevVAddrAlignment - Required alignment.
+ Out: pDevVAddr - Receives base address of allocation.
+ RETURNS: IMG_TRUE - Success
+ IMG_FALSE - Failure
+******************************************************************************/
+IMG_BOOL
+MMU_Alloc (MMU_HEAP *pMMUHeap,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Free
+ PURPOSE: Frees space in an mmu's virtual address space.
+ PARAMETERS: In: pMMUHeap - MMU to free on.
+ In: DevVAddr - Base address of allocation.
+ RETURNS: IMG_TRUE - Success
+ IMG_FALSE - Failure
+******************************************************************************/
+IMG_VOID
+MMU_Free (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_UINT32 ui32Size);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Enable
+
+ PURPOSE: Enable an mmu. Establishes pages tables and takes the mmu out
+ of bypass and waits for the mmu to acknowledge enabled.
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_Enable (MMU_HEAP *pMMUHeap);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_Disable
+
+ PURPOSE: Disable an mmu, takes the mmu into bypass.
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_Disable (MMU_HEAP *pMMUHeap);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_MapPages
+
+ PURPOSE: Create a mapping for a range of pages from a device physical
+ adddress to a specified device virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: SysPAddr - the system physical address of the page to map.
+ In: uSize - size of memory range in bytes
+ In: ui32MemFlags - page table flags.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapPages (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_MapPagesSparse
+
+ PURPOSE: Create a mapping for a range of pages from a device physical
+ adddress to a specified device virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: SysPAddr - the system physical address of the page to map.
+ In: ui32ChunkSize - Size of the chunk (must be page multiple)
+ In: ui32NumVirtChunks - Number of virtual chunks
+ In: ui32NumPhysChunks - Number of physical chunks
+ In: pabMapChunk - Mapping array
+ In: ui32MemFlags - page table flags.
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapPagesSparse (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_BOOL *pabMapChunk,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_MapShadow
+
+ PURPOSE: Create a mapping for a range of pages from a CPU virtual
+ adddress to a specified device virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: MapBaseDevVAddr - A page aligned device virtual address
+ to start mapping from.
+ In: uByteSize - A page aligned mapping length in bytes.
+ In: CpuVAddr - A page aligned CPU virtual address.
+ In: hOSMemHandle - An alternative OS specific memory handle
+ for mapping RAM without a CPU virtual
+ address
+ Out: pDevVAddr - deprecated
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ In: ui32MemFlags - page table flags.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapShadow (MMU_HEAP * pMMUHeap,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_SIZE_T uByteSize,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR * pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_MapShadowSparse
+
+ PURPOSE: Create a mapping for a range of pages from a CPU virtual
+ adddress to a specified device virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: MapBaseDevVAddr - A page aligned device virtual address
+ to start mapping from.
+ In: ui32ChunkSize - Size of the chunk (must be page multiple)
+ In: ui32NumVirtChunks - Number of virtual chunks
+ In: ui32NumPhysChunks - Number of physical chunks
+ In: pabMapChunk - Mapping array
+ In: CpuVAddr - A page aligned CPU virtual address.
+ In: hOSMemHandle - An alternative OS specific memory handle
+ for mapping RAM without a CPU virtual
+ address
+ Out: pDevVAddr - deprecated
+ In: hUniqueTag - A unique ID for use as a tag identifier
+ In: ui32MemFlags - page table flags.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapShadowSparse (MMU_HEAP * pMMUHeap,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_BOOL * pabMapChunk,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR * pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_UnmapPages
+
+ PURPOSE: unmaps pages and invalidates virtual address.
+
+ PARAMETERS: In: psMMUHeap - the mmu.
+ In: sDevVAddr - the device virtual address.
+ In: ui32PageCount - page count.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_UnmapPages (MMU_HEAP *psMMUHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_MapScatter
+
+ PURPOSE: Create a mapping for a list of pages to a specified device
+ virtual address.
+
+ PARAMETERS: In: pMMUHeap - the mmu.
+ In: DevVAddr - the device virtual address.
+ In: psSysAddr - the list of physical addresses of the pages to
+ map.
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+MMU_MapScatter (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+
+/*
+******************************************************************************
+ FUNCTION: MMU_GetPhysPageAddr
+
+ PURPOSE: extracts physical address from MMU page tables
+
+ PARAMETERS: In: pMMUHeap - the mmu
+ PARAMETERS: In: sDevVPageAddr - the virtual address to extract physical
+ page mapping from
+ RETURNS: IMG_DEV_PHYADDR
+******************************************************************************/
+IMG_DEV_PHYADDR
+MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
+
+
+/*
+******************************************************************************
+ FUNCTION: MMU_GetPDDevPAddr
+
+ PURPOSE: returns PD given the MMU context (SGX to MMU API)
+
+ PARAMETERS: In: pMMUContext - the mmu
+ RETURNS: IMG_DEV_PHYADDR
+******************************************************************************/
+IMG_DEV_PHYADDR
+MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
+
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+/*
+******************************************************************************
+ FUNCTION: EnableHostAccess
+
+ PURPOSE: Enables Host accesses to device memory, by passing the device
+ MMU address translation
+
+ PARAMETERS: In: psMMUContext
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+EnableHostAccess (MMU_CONTEXT *psMMUContext);
+
+
+/*
+******************************************************************************
+ FUNCTION: DisableHostAccess
+
+ PURPOSE: Disables Host accesses to device memory, by passing the device
+ MMU address translation
+
+ PARAMETERS: In: psMMUContext
+ RETURNS: None
+******************************************************************************/
+IMG_VOID
+DisableHostAccess (MMU_CONTEXT *psMMUContext);
+#endif
+
+/*
+******************************************************************************
+ FUNCTION: MMU_InvalidateDirectoryCache
+
+ PURPOSE: Invalidates the page directory cache
+
+ PARAMETERS: In: psDevInfo
+ RETURNS: None
+******************************************************************************/
+IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_BIFResetPDAlloc
+
+ PURPOSE: Allocate a dummy Page Directory which causes all virtual
+ addresses to page fault.
+
+ PARAMETERS: In: psDevInfo - device info
+ RETURNS: PVRSRV_OK or error
+******************************************************************************/
+PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_BIFResetPDFree
+
+ PURPOSE: Free resources allocated in MMU_BIFResetPDAlloc.
+
+ PARAMETERS: In: psDevInfo - device info
+ RETURNS:
+******************************************************************************/
+IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+/*
+******************************************************************************
+ FUNCTION: MMU_MapExtSystemCacheRegs
+
+ PURPOSE: maps external system cache control registers into SGX MMU
+
+ PARAMETERS: In: psDeviceNode - device node
+ RETURNS:
+******************************************************************************/
+PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_UnmapExtSystemCacheRegs
+
+ PURPOSE: unmaps external system cache control registers
+
+ PARAMETERS: In: psDeviceNode - device node
+ RETURNS:
+******************************************************************************/
+PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif /* #if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) */
+
+/*
+******************************************************************************
+ FUNCTION: MMU_IsHeapShared
+
+ PURPOSE: Is this heap shared?
+ PARAMETERS: In: pMMU_Heap
+ RETURNS: true if heap is shared
+******************************************************************************/
+IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMU_Heap);
+
+#if defined(FIX_HW_BRN_31620)
+/*
+******************************************************************************
+ FUNCTION: MMU_GetCacheFlushRange
+
+ PURPOSE: Gets device physical address of the mmu context.
+
+ PARAMETERS: In: pMMUContext - the mmu context
+ Out: pui32RangeMask - Bit mask showing which PD cache
+ lines have changed
+ RETURNS: None
+******************************************************************************/
+IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask);
+
+/*
+******************************************************************************
+ FUNCTION: MMU_GetPDPhysAddr
+
+ PURPOSE: Gets device physical address of the mmu contexts PD.
+
+ PARAMETERS: In: pMMUContext - the mmu context
+ Out: psDevPAddr - Address of PD
+ RETURNS: None
+******************************************************************************/
+IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr);
+
+#endif
+
+
+IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32RegVal);
+
+#if defined(PDUMP)
+/*
+******************************************************************************
+ FUNCTION: MMU_GetPDumpContextID
+
+ PURPOSE: translates device mem context to unique pdump identifier
+
+ PARAMETERS: In: hDevMemContext - device memory per-process context
+ RETURNS: context identifier used internally in pdump
+******************************************************************************/
+IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext);
+#endif /* #ifdef PDUMP */
+
+#endif /* #ifndef _MMU_H_ */
diff --git a/pvr-source/services4/srvkm/devices/sgx/pb.c b/pvr-source/services4/srvkm/devices/sgx/pb.c
new file mode 100644
index 0000000..4ed18bb
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/pb.c
@@ -0,0 +1,493 @@
+/*************************************************************************/ /*!
+@Title Parameter Buffer management functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "services_headers.h"
+#include "sgx_bridge_km.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "pvr_bridge_km.h"
+#include "pdump_km.h"
+#include "sgxutils.h"
+
+#if !defined(__linux__) && !defined(__QNXNTO__)
+#pragma message("FIXME: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
+#endif
+
+#include "lists.h"
+
+static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC)
+
+static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL;
+static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL;
+
+static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy);
+static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy);
+
+/* override level pointer indirection */
+/* PRQA S 5102 12 */
+IMG_EXPORT PVRSRV_ERROR
+SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ IMG_BOOL bLockOnFailure,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
+{
+ PVRSRV_STUB_PBDESC *psStubPBDesc;
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
+ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
+ PVRSRV_ERROR eError;
+
+ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
+ if (psStubPBDesc != IMG_NULL)
+ {
+ IMG_UINT32 i;
+ PRESMAN_ITEM psResItem;
+
+ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
+ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
+ }
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *)
+ * psStubPBDesc->ui32SubKernelMemInfosCount,
+ (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
+ IMG_NULL,
+ "Array of Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
+
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ExitNotFound;
+ }
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC,
+ psStubPBDesc,
+ 0,
+ &SGXCleanupSharedPBDescCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount,
+ ppsSharedPBDescSubKernelMemInfos,
+ 0);
+ /*not nulling pointer, out of scope*/
+
+ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
+
+ eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE;
+ goto ExitNotFound;
+ }
+
+ *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
+ *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
+ *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
+ *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo;
+
+ *ui32SharedPBDescSubKernelMemInfosCount =
+ psStubPBDesc->ui32SubKernelMemInfosCount;
+
+ *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
+
+ for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
+ {
+ ppsSharedPBDescSubKernelMemInfos[i] =
+ psStubPBDesc->ppsSubKernelMemInfos[i];
+ }
+
+ psStubPBDesc->ui32RefCount++;
+ *phSharedPBDesc = (IMG_HANDLE)psResItem;
+ return PVRSRV_OK;
+ }
+
+ eError = PVRSRV_OK;
+ if (bLockOnFailure)
+ {
+ if (psResItemCreateSharedPB == IMG_NULL)
+ {
+ psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
+ psPerProc,
+ 0,
+ &SGXCleanupSharedPBDescCreateLockCallback);
+
+ if (psResItemCreateSharedPB == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
+
+ eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE;
+ goto ExitNotFound;
+ }
+ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
+ psPerProcCreateSharedPB = psPerProc;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
+ }
+ }
+ExitNotFound:
+ *phSharedPBDesc = IMG_NULL;
+
+ return eError;
+}
+
+
+static PVRSRV_ERROR
+SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
+{
+ /*PVRSRV_STUB_PBDESC **ppsStubPBDesc;*/
+ IMG_UINT32 i;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie;
+
+ psStubPBDescIn->ui32RefCount--;
+ if (psStubPBDescIn->ui32RefCount == 0)
+ {
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr = psStubPBDescIn->sHWPBDescDevVAddr;
+ List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn);
+ for(i=0 ; i<psStubPBDescIn->ui32SubKernelMemInfosCount; i++)
+ {
+ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
+ psStubPBDescIn->ppsSubKernelMemInfos[i]);
+ }
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount,
+ psStubPBDescIn->ppsSubKernelMemInfos,
+ 0);
+ psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL;
+
+ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo);
+
+ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo);
+
+ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo);
+
+ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo);
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_STUB_PBDESC),
+ psStubPBDescIn,
+ 0);
+ /*not nulling pointer, copy on stack*/
+
+ /* signal the microkernel to clear its sTAHWPBDesc and s3DHWPBDesc values in sTA3DCtl */
+ SGXCleanupRequest(psDeviceNode,
+ &sHWPBDescDevVAddr,
+ PVRSRV_CLEANUPCMD_PB,
+ CLEANUP_WITH_POLL);
+ }
+ return PVRSRV_OK;
+ /*return PVRSRV_ERROR_INVALID_PARAMS;*/
+}
+
+static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy)
+{
+ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ return SGXCleanupSharedPBDescKM(psStubPBDesc);
+}
+
+static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy)
+{
+#ifdef DEBUG
+ PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
+ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
+#else
+ PVR_UNREFERENCED_PARAMETER(pvParam);
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ psPerProcCreateSharedPB = IMG_NULL;
+ psResItemCreateSharedPB = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
+{
+ PVR_ASSERT(hSharedPBDesc != IMG_NULL);
+
+ return ResManFreeResByPtr(hSharedPBDesc, CLEANUP_WITH_POLL);
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount,
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr)
+{
+ PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
+ PVRSRV_ERROR eRet = PVRSRV_ERROR_INVALID_PERPROC;
+ IMG_UINT32 i;
+ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
+ PRESMAN_ITEM psResItem;
+
+ /*
+ * The caller must have previously called SGXFindSharedPBDesc with
+ * bLockOnFailure set, and not managed to find a suitable shared PB.
+ */
+ if (psPerProcCreateSharedPB != psPerProc)
+ {
+ goto NoAdd;
+ }
+ else
+ {
+ PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL);
+
+ ResManFreeResByPtr(psResItemCreateSharedPB, CLEANUP_WITH_POLL);
+
+ PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL);
+ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
+ }
+
+ psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
+ if (psStubPBDesc != IMG_NULL)
+ {
+ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
+ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
+
+ }
+
+ /*
+ * We make the caller think the add was successful,
+ * but return the existing shared PB desc rather than
+ * a new one.
+ */
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC,
+ psStubPBDesc,
+ 0,
+ &SGXCleanupSharedPBDescCallback);
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "SGXAddSharedPBDescKM: "
+ "Failed to register existing shared "
+ "PBDesc with the resource manager"));
+ goto NoAddKeepPB;
+ }
+
+ /*
+ * The caller will unreference the PB desc after
+ * a successful add, so up the reference count.
+ */
+ psStubPBDesc->ui32RefCount++;
+
+ *phSharedPBDesc = (IMG_HANDLE)psResItem;
+ eRet = PVRSRV_OK;
+ goto NoAddKeepPB;
+ }
+
+ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_STUB_PBDESC),
+ (IMG_VOID **)&psStubPBDesc,
+ 0,
+ "Stub Parameter Buffer Description") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
+ "StubPBDesc"));
+ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto NoAdd;
+ }
+
+
+ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
+
+ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *)
+ * ui32SharedPBDescSubKernelMemInfosCount,
+ (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
+ 0,
+ "Array of Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
+ "Failed to alloc "
+ "StubPBDesc->ppsSubKernelMemInfos"));
+ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ psStubPBDesc->ui32RefCount = 1;
+ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
+ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
+ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
+ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
+ psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo;
+
+ psStubPBDesc->ui32SubKernelMemInfosCount =
+ ui32SharedPBDescSubKernelMemInfosCount;
+ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
+ {
+ psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
+ if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
+ != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
+ "Failed to dissociate shared PBDesc "
+ "from process"));
+ goto NoAdd;
+ }
+ }
+
+ psStubPBDesc->sHWPBDescDevVAddr = sHWPBDescDevVAddr;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC,
+ psStubPBDesc,
+ 0,
+ &SGXCleanupSharedPBDescCallback);
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
+ "Failed to register shared PBDesc "
+ " with the resource manager"));
+ goto NoAdd;
+ }
+ psStubPBDesc->hDevCookie = hDevCookie;
+
+ /* Finally everything was prepared successfully so link the new
+ * PB in to place. */
+ List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM),
+ psStubPBDesc);
+
+ *phSharedPBDesc = (IMG_HANDLE)psResItem;
+
+ return PVRSRV_OK;
+
+NoAdd:
+ if(psStubPBDesc)
+ {
+ if(psStubPBDesc->ppsSubKernelMemInfos)
+ {
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
+ psStubPBDesc->ppsSubKernelMemInfos,
+ 0);
+ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
+ }
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_STUB_PBDESC),
+ psStubPBDesc,
+ 0);
+ /*not nulling pointer, out of scope*/
+ }
+
+NoAddKeepPB:
+ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
+ {
+ PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]);
+ }
+
+ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
+
+ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo);
+
+ return eRet;
+}
+
+/******************************************************************************
+ End of file (pb.c)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h b/pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h
new file mode 100644
index 0000000..f281c4e
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h
@@ -0,0 +1,279 @@
+/*************************************************************************/ /*!
+@Title SGX Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the SGX Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SGX_BRIDGE_KM_H__)
+#define __SGX_BRIDGE_KM_H__
+
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "sgx_bridge.h"
+#include "pvr_bridge.h"
+#include "perproc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+IMG_IMPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK_KM *psKick);
+#else
+PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
+#endif
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+IMG_IMPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK_KM *psKick);
+#else
+PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
+#endif
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_CCB_KICK_KM *psCCBKick);
+#else
+ SGX_CCB_KICK *psCCBKick);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEV_PHYADDR *pDevPAddr,
+ IMG_CPU_PHYADDR *pCpuPAddr);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_PHYADDR *psPDDevPAddr);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
+ SGX_CLIENT_INFO* psClientInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ SGX_MISC_INFO *psMiscInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ArraySize,
+ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
+ IMG_UINT32 *pui32DataCount,
+ IMG_UINT32 *pui32ClockSpeed,
+ IMG_UINT32 *pui32HostTimeStamp);
+
+IMG_IMPORT
+PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
+ IMG_BOOL bWaitForComplete);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *pasHeapInfo,
+ IMG_DEV_PHYADDR *psPDDevPAddr);
+#else
+ SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM *psInitInfo);
+#else
+ SGX_BRIDGE_INIT_INFO *psInitInfo);
+#endif
+
+/*!
+ * *****************************************************************************
+ * @brief Looks for a parameter buffer description that corresponds to
+ * a buffer of size ui32TotalPBSize, optionally taking the lock
+ * needed for SharedPBCreation on failure.
+ *
+ * Note if a PB Desc is found then its internal reference counter
+ * is automatically incremented. It is your responsability to call
+ * SGXUnrefSharedPBDesc to decrement this reference and free associated
+ * resources when you are done.
+ *
+ * If bLockOnFailure is set, and a suitable shared PB isn't found,
+ * an internal flag is set, allowing this process to create a
+ * shared PB. Any other process calling this function with
+ * bLockOnFailure set, will receive the return code
+ * PVRSRV_ERROR_PROCESSING_BLOCKED, indicating that it needs
+ * to retry the function call. The internal flag is cleared
+ * when this process creates a shared PB.
+ *
+ * Note: You are responsible for freeing the list returned in
+ * pppsSharedPBDescSubKernelMemInfos
+ * via OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ * sizeof(PVRSRV_KERNEL_MEM_INFO *)
+ * * ui32SharedPBDescSubKernelMemInfosCount,
+ * ppsSharedPBDescSubKernelMemInfos,
+ * NULL);
+ *
+ * @param[in] psPerProc
+ * @param[in] hDevCookie
+ * @param[in] bLockOnError
+ * @param[in] ui32TotalPBSize
+ * @param[in] phSharedPBDesc
+ * @param[out] ppsSharedPBDescKernelMemInfo
+ * @param[out] ppsHWPBDescKernelMemInfo
+ * @param[out] pppsSharedPBDescSubKernelMemInfos A list of integral sub meminfos.
+ * @param[out] ui32SharedPBDescSubKernelMemInfosCount
+ *
+ * @return PVRSRV_ERROR
+ ********************************************************************************/
+/* disable QAC pointer level check for over 2 */
+/* PRQA S 5102++ */
+IMG_IMPORT PVRSRV_ERROR
+SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ IMG_BOOL bLockOnFailure,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
+
+/*!
+ * *****************************************************************************
+ * @brief Decrements the reference counter and frees all userspace resources
+ * associated with a SharedPBDesc.
+ *
+ * @param hSharedPBDesc
+ *
+ * @return PVRSRV_ERROR
+ ********************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
+
+/*!
+ * *****************************************************************************
+ * @brief Links a new SharedPBDesc into a kernel managed list that can
+ * then be queried by other clients.
+ *
+ * As a side affect this function also dissociates the SharedPBDesc
+ * from the calling process so that the memory won't be freed if the
+ * process dies/exits. (The kernel assumes responsability over the
+ * memory at the same time)
+ *
+ * As well as the psSharedPBDescKernelMemInfo you must also pass
+ * a complete list of other meminfos that are integral to the
+ * shared PB description. (Although the kernel doesn't have direct
+ * access to the shared PB desc it still needs to be able to
+ * clean up all the associated resources when it is no longer
+ * in use.)
+ *
+ * If the dissociation fails then all the memory associated with
+ * the psSharedPBDescKernelMemInfo and all entries in psKernelMemInfos
+ * will be freed by kernel services! Because of this, you are
+ * responsible for freeing the corresponding client meminfos _before_
+ * calling SGXAddSharedPBDescKM.
+ *
+ * This function will return an error unless a succesful call to
+ * SGXFindSharedPBDesc, with bLockOnFailure set, has been made.
+ *
+ * @param psPerProc
+ * @param hDevCookie
+ * @param psSharedPBDescKernelMemInfo
+ * @param psHWPBDescKernelMemInfo
+ * @param psBlockKernelMemInfo
+ * @param ui32TotalPBSize The size of the associated parameter buffer
+ * @param ppsSharedPBDescSubKernelMemInfos A list of other meminfos integral to
+ * the shared PB description.
+ * @param ui32SharedPBDescSubKernelMemInfosCount The number of entires in
+ * psKernelMemInfos
+ * @param sHWPBDescDevVAddr The device virtual address of the HWPBDesc
+ *
+ * @return PVRSRV_ERROR
+ ********************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount,
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr);
+
+
+/*!
+ * *****************************************************************************
+ * @brief Gets device information that is not intended to be passed
+ on beyond the srvclient libs.
+ *
+ * @param[in] hDevCookie
+ * @param[out] psSGXInternalDevInfo
+ *
+ * @return
+ ********************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_INTERNAL_DEVINFO_KM *psSGXInternalDevInfo);
+#else
+ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __SGX_BRIDGE_KM_H__ */
+
+/******************************************************************************
+ End of file (sgx_bridge_km.h)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxconfig.h b/pvr-source/services4/srvkm/devices/sgx/sgxconfig.h
new file mode 100644
index 0000000..b9ebab9
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxconfig.h
@@ -0,0 +1,481 @@
+/*************************************************************************/ /*!
+@Title device configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SGXCONFIG_H__
+#define __SGXCONFIG_H__
+
+#include "sgxdefs.h"
+
+#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
+#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
+
+#define DEV_MAJOR_VERSION 1
+#define DEV_MINOR_VERSION 0
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00001000
+#else
+#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00000000
+#endif
+
+#if !defined(ION_HEAP_SIZE) && defined(SUPPORT_ION)
+ /* Default the Ion heap to 16MB */
+ #define ION_HEAP_SIZE 0x01000000
+#else
+ #define ION_HEAP_SIZE 0
+#endif
+
+
+#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
+#if defined(FIX_HW_BRN_31620)
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+ #define SGX_2D_HEAP_BASE 0x04000000
+ #define SGX_2D_HEAP_SIZE (0x08000000-0x04000000-0x00001000)
+ #endif
+
+ #define SGX_GENERAL_HEAP_BASE 0x08000000
+ #define SGX_GENERAL_HEAP_SIZE (0xB8000000-0x00001000)
+
+ /*
+ * For hybrid PB we have to split virtual PB range between the shared
+ * PB and percontext PB due to the fact we only have one heap config
+ * per device.
+ * If hybrid PB is enabled we split the space acording to HYBRID_SHARED_PB_SIZE.
+ * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the
+ * remainder is the size of the percontext PB.
+ * If hybrid PB is not enabled then we still create both heaps (helps keep
+ * the code clean) and define the size of the unused one to 0
+ */
+
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000
+
+ /* By default we split the PB 50/50 */
+#if !defined(HYBRID_SHARED_PB_SIZE)
+ #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1)
+#endif
+#if defined(SUPPORT_HYBRID_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000)
+#else
+#if defined(SUPPORT_PERCONTEXT_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE 0
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+#endif
+#if defined(SUPPORT_SHARED_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0
+#endif
+#endif
+
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xC0000000
+ /* Size is defiend above */
+
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE)
+ /* Size is defiend above */
+
+ #define SGX_TADATA_HEAP_BASE 0xD0000000
+ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
+
+ #define SGX_SYNCINFO_HEAP_BASE 0xE0000000
+ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
+
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xE4000000
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
+
+ #define SGX_KERNEL_CODE_HEAP_BASE 0xE8000000
+ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
+
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xEC000000
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
+
+ #define SGX_KERNEL_DATA_HEAP_BASE (0xF0000000+SGX_KERNEL_DATA_HEAP_OFFSET)
+ #define SGX_KERNEL_DATA_HEAP_SIZE (0x03000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET))
+
+ /* Actual Pixel and Vertex shared heaps sizes may be reduced by
+ * override - see SGX_USE_CODE_SEGMENT_RANGE_BITS.*/
+ #define SGX_PIXELSHADER_HEAP_BASE 0xF4000000
+ #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000)
+
+ #define SGX_VERTEXSHADER_HEAP_BASE 0xFC000000
+ #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000)
+#else /* FIX_HW_BRN_31620 */
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+ #define SGX_2D_HEAP_BASE 0x00100000
+ #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
+ #endif
+
+ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000
+ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000)
+ #endif
+
+ #if !defined(SUPPORT_MEMORY_TILING)
+ #if defined (SUPPORT_ION)
+ #define SGX_GENERAL_HEAP_BASE 0x10000000
+ #define SGX_GENERAL_HEAP_SIZE (0xC2000000-ION_HEAP_SIZE-0x00001000)
+
+ #define SGX_ION_HEAP_BASE (SGX_GENERAL_HEAP_BASE+SGX_GENERAL_HEAP_SIZE+0x00001000)
+ #define SGX_ION_HEAP_SIZE (ION_HEAP_SIZE-0x00001000)
+ #else
+ #define SGX_GENERAL_HEAP_BASE 0x10000000
+ #define SGX_GENERAL_HEAP_SIZE (0xC2000000-0x00001000)
+ #endif
+ #else
+ #include <sgx_msvdx_defs.h>
+ /* Create heaps with memory tiling enabled.
+ * SGX HW limit is 10 heaps.
+ */
+ /* Tiled heap space is taken from general heap */
+ #define SGX_GENERAL_HEAP_BASE 0x10000000
+ #define SGX_GENERAL_HEAP_SIZE (0xB5000000-0x00001000)
+
+ #define SGX_VPB_TILED_HEAP_STRIDE TILING_TILE_STRIDE_2K
+ #define SGX_VPB_TILED_HEAP_BASE 0xC5000000
+ #define SGX_VPB_TILED_HEAP_SIZE (0x0D000000-0x00001000)
+
+ /* Check tiled heap base alignment */
+ #if((SGX_VPB_TILED_HEAP_BASE & SGX_BIF_TILING_ADDR_INV_MASK) != 0)
+ #error "sgxconfig.h: SGX_VPB_TILED_HEAP has insufficient alignment"
+ #endif
+
+ #endif /* SUPPORT_MEMORY_TILING */
+
+ /*
+ * For hybrid PB we have to split virtual PB range between the shared
+ * PB and percontext PB due to the fact we only have one heap config
+ * per device.
+ * If hybrid PB is enabled we split the space acording to HYBRID_SHARED_PB_SIZE.
+ * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the
+ * remainder is the size of the percontext PB.
+ * If hybrid PB is not enabled then we still create both heaps (helps keep
+ * the code clean) and define the size of the unused one to 0
+ */
+
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000
+
+ /* By default we split the PB 50/50 */
+#if !defined(HYBRID_SHARED_PB_SIZE)
+ #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1)
+#endif
+#if defined(SUPPORT_HYBRID_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000)
+#else
+#if defined(SUPPORT_PERCONTEXT_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE 0
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+#endif
+#if defined(SUPPORT_SHARED_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0
+#endif
+#endif
+
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xD2000000
+ /* Size is defiend above */
+
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE)
+ /* Size is defiend above */
+
+ #define SGX_TADATA_HEAP_BASE 0xE2000000
+ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
+
+ #define SGX_SYNCINFO_HEAP_BASE 0xEF000000
+ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
+
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF0000000
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
+
+ #define SGX_KERNEL_CODE_HEAP_BASE 0xF2000000
+ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
+
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF2400000
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
+
+ #define SGX_KERNEL_DATA_HEAP_BASE (0xF4000000+SGX_KERNEL_DATA_HEAP_OFFSET)
+ #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET))
+
+ /* Actual Pixel and Vertex shared heaps sizes may be reduced by
+ * override - see SGX_USE_CODE_SEGMENT_RANGE_BITS.*/
+ #define SGX_PIXELSHADER_HEAP_BASE 0xF9000000
+ #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000)
+
+ #define SGX_VERTEXSHADER_HEAP_BASE 0xFE000000
+ #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000)
+#endif /* FIX_HW_BRN_31620 */
+ /* signal we've identified the core by the build */
+ #define SGX_CORE_IDENTIFIED
+#endif /* SGX_FEATURE_ADDRESS_SPACE_SIZE == 32 */
+
+#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000
+ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000)
+
+ #define SGX_GENERAL_HEAP_BASE 0x01800000
+ #define SGX_GENERAL_HEAP_SIZE (0x07000000-ION_HEAP_SIZE-0x00001000)
+
+#else
+ #define SGX_GENERAL_HEAP_BASE 0x00001000
+#if defined(SUPPORT_LARGE_GENERAL_HEAP)
+ #define SGX_GENERAL_HEAP_SIZE (0x0B800000-ION_HEAP_SIZE-0x00001000-0x00001000)
+#else
+ #define SGX_GENERAL_HEAP_SIZE (0x08800000-ION_HEAP_SIZE-0x00001000-0x00001000)
+#endif
+#endif
+
+#if defined(SUPPORT_ION)
+ #define SGX_ION_HEAP_BASE (SGX_GENERAL_HEAP_BASE+SGX_GENERAL_HEAP_SIZE+0x00001000)
+ #define SGX_ION_HEAP_SIZE (ION_HEAP_SIZE-0x00001000)
+#endif
+ /*
+ * For hybrid PB we have to split virtual PB range between the shared
+ * PB and percontext PB due to the fact we only have one heap config
+ * per device.
+ * If hybrid PB is enabled we split the space acording to HYBRID_SHARED_PB_SIZE.
+ * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the
+ * remainder is the size of the percontext PB.
+ * If hybrid PB is not enabled then we still create both heaps (helps keep
+ * the code clean) and define the size of the unused one to 0
+ */
+#if defined(SUPPORT_LARGE_GENERAL_HEAP)
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x01000000
+#else
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x04000000
+#endif
+
+ /* By default we split the PB 50/50 */
+#if !defined(HYBRID_SHARED_PB_SIZE)
+ #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1)
+#endif
+#if defined(SUPPORT_HYBRID_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000)
+#else
+#if defined(SUPPORT_PERCONTEXT_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE 0
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+#endif
+#if defined(SUPPORT_SHARED_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0
+#endif
+#endif
+
+#if defined(SUPPORT_LARGE_GENERAL_HEAP)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x0B800000
+#else
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x08800000
+#endif
+
+ /* Size is defined above */
+
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE)
+ /* Size is defined above */
+
+ #define SGX_TADATA_HEAP_BASE 0x0C800000
+ #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
+
+ #define SGX_SYNCINFO_HEAP_BASE 0x0D800000
+ #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
+
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
+
+ #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000
+ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
+
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
+
+ #define SGX_KERNEL_DATA_HEAP_BASE (0x0F000000+SGX_KERNEL_DATA_HEAP_OFFSET)
+ #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET))
+
+ #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
+ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
+
+ #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
+ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
+
+ /* signal we've identified the core by the build */
+ #define SGX_CORE_IDENTIFIED
+
+#endif /* SGX_FEATURE_ADDRESS_SPACE_SIZE == 28 */
+
+#if !defined(SGX_CORE_IDENTIFIED)
+ #error "sgxconfig.h: ERROR: unspecified SGX Core version"
+#endif
+
+/*********************************************************************************
+ *
+ * SGX_PDSPIXEL_CODEDATA_HEAP_BASE + 64MB range must include PDSVERTEX_CODEDATA and KERNEL_CODE heaps
+ *
+ ********************************************************************************/
+#if !defined (SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE)
+ #if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000)
+ #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE"
+ #endif
+
+ #if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000)
+ #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE"
+ #endif
+#endif
+
+/*********************************************************************************
+ *
+ * The General Mapping heap must be within the 2D requestor range of the 2D heap base
+ *
+ ********************************************************************************/
+#if defined(SGX_FEATURE_2D_HARDWARE) && defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE - SGX_2D_HEAP_BASE) >= EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP inaccessable by 2D requestor"
+ #endif
+#endif
+
+/*********************************************************************************
+ *
+ * The kernel code heap base must be aligned to a USSE code page
+ *
+ ********************************************************************************/
+#if defined (EURASIA_USE_CODE_PAGE_SIZE)
+ #if ((SGX_KERNEL_CODE_HEAP_BASE & (EURASIA_USE_CODE_PAGE_SIZE - 1)) != 0)
+ #error "sgxconfig.h: ERROR: Kernel code heap base misalignment"
+ #endif
+#endif
+
+/*********************************************************************************
+ *
+ * Heap overlap check
+ *
+ ********************************************************************************/
+#if defined(SGX_FEATURE_2D_HARDWARE)
+ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_MAPPING_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_MAPPING_HEAP"
+ #endif
+ #else
+ #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_HEAP_BASE"
+ #endif
+ #endif
+#endif
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP overlaps SGX_GENERAL_HEAP"
+ #endif
+#endif
+
+#if defined(SUPPORT_HYBRID_PB)
+ #if ((HYBRID_SHARED_PB_SIZE + 0x000001000) > SGX_3DPARAMETERS_HEAP_SIZE)
+ #error "sgxconfig.h: ERROR: HYBRID_SHARED_PB_SIZE too large"
+ #endif
+#endif
+
+#if defined(SUPPORT_MEMORY_TILING)
+ #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_VPB_TILED_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_VPB_TILED_HEAP"
+ #endif
+ #if ((SGX_VPB_TILED_HEAP_BASE + SGX_VPB_TILED_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_VPB_TILED_HEAP overlaps SGX_3DPARAMETERS_HEAP"
+ #endif
+#else
+ #if defined(SUPPORT_ION)
+ #if ((SGX_ION_HEAP_BASE + SGX_ION_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_ION_HEAP overlaps SGX_3DPARAMETERS_HEAP"
+ #endif
+ #endif
+ #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_3DPARAMETERS_HEAP"
+ #endif
+#endif
+
+#if (((SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE + SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE) >= SGX_TADATA_HEAP_BASE) && (SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE > 0))
+ #error "sgxconfig.h: ERROR: SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE overlaps SGX_TADATA_HEAP"
+#endif
+
+#if ((SGX_TADATA_HEAP_BASE + SGX_TADATA_HEAP_SIZE) >= SGX_SYNCINFO_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_TADATA_HEAP overlaps SGX_SYNCINFO_HEAP"
+#endif
+
+#if ((SGX_SYNCINFO_HEAP_BASE + SGX_SYNCINFO_HEAP_SIZE) >= SGX_PDSPIXEL_CODEDATA_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_SYNCINFO_HEAP overlaps SGX_PDSPIXEL_CODEDATA_HEAP"
+#endif
+
+#if ((SGX_PDSPIXEL_CODEDATA_HEAP_BASE + SGX_PDSPIXEL_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_CODE_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_PDSPIXEL_CODEDATA_HEAP overlaps SGX_KERNEL_CODE_HEAP"
+#endif
+
+#if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE) >= SGX_PDSVERTEX_CODEDATA_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP overlaps SGX_PDSVERTEX_CODEDATA_HEAP"
+#endif
+
+#if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_DATA_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP overlaps SGX_KERNEL_DATA_HEAP"
+#endif
+
+#if ((SGX_KERNEL_DATA_HEAP_BASE + SGX_KERNEL_DATA_HEAP_SIZE) >= SGX_PIXELSHADER_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_KERNEL_DATA_HEAP overlaps SGX_PIXELSHADER_HEAP"
+#endif
+
+#if ((SGX_PIXELSHADER_HEAP_BASE + SGX_PIXELSHADER_HEAP_SIZE) >= SGX_VERTEXSHADER_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_PIXELSHADER_HEAP overlaps SGX_VERTEXSHADER_HEAP"
+#endif
+
+#if ((SGX_VERTEXSHADER_HEAP_BASE + SGX_VERTEXSHADER_HEAP_SIZE) < SGX_VERTEXSHADER_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_VERTEXSHADER_HEAP_BASE size cause wraparound"
+#endif
+
+#endif /* __SGXCONFIG_H__ */
+
+/*****************************************************************************
+ End of file (sgxconfig.h)
+*****************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h b/pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h
new file mode 100644
index 0000000..125da09
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h
@@ -0,0 +1,610 @@
+/*************************************************************************/ /*!
+@Title SGX kernel services structues/functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Structures and inline functions for KM services component
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __SGXINFOKM_H__
+#define __SGXINFOKM_H__
+
+#include "sgxdefs.h"
+#include "device.h"
+#include "power.h"
+#include "sysconfig.h"
+#include "sgxscript.h"
+#include "sgxinfo.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/****************************************************************************/
+/* kernel only defines: */
+/****************************************************************************/
+/* SGXDeviceMap Flag defines */
+#define SGX_HOSTPORT_PRESENT 0x00000001UL
+
+
+/*
+ SGX PDUMP register bank name (prefix)
+*/
+#define SGX_PDUMPREG_NAME "SGXREG"
+
+/****************************************************************************/
+/* kernel only structures: */
+/****************************************************************************/
+
+/*Forward declaration*/
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+
+typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
+
+typedef struct _PVRSRV_SGXDEV_INFO_
+{
+ PVRSRV_DEVICE_TYPE eDeviceType;
+ PVRSRV_DEVICE_CLASS eDeviceClass;
+
+ IMG_UINT8 ui8VersionMajor;
+ IMG_UINT8 ui8VersionMinor;
+ IMG_UINT32 ui32CoreConfig;
+ IMG_UINT32 ui32CoreFlags;
+
+ /* Kernel mode linear address of device registers */
+ IMG_PVOID pvRegsBaseKM;
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ /* Kernel mode linear address of host port */
+ IMG_PVOID pvHostPortBaseKM;
+ /* HP size */
+ IMG_UINT32 ui32HPSize;
+ /* HP syspaddr */
+ IMG_SYS_PHYADDR sHPSysPAddr;
+#endif
+
+ /* FIXME: The alloc for this should go through OSAllocMem in future */
+ IMG_HANDLE hRegMapping;
+
+ /* System physical address of device registers*/
+ IMG_SYS_PHYADDR sRegsPhysBase;
+ /* Register region size in bytes */
+ IMG_UINT32 ui32RegSize;
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+ /* external system cache register region size in bytes */
+ IMG_UINT32 ui32ExtSysCacheRegsSize;
+ /* external system cache register device relative physical address */
+ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
+ /* ptr to page table */
+ IMG_UINT32 *pui32ExtSystemCacheRegsPT;
+ /* handle to page table alloc/mapping */
+ IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle;
+ /* sys phys addr of PT */
+ IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr;
+#endif
+
+ /* SGX clock speed */
+ IMG_UINT32 ui32CoreClockSpeed;
+ IMG_UINT32 ui32uKernelTimerClock;
+ IMG_BOOL bSGXIdle;
+
+ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
+
+
+ /* kernel memory context info */
+ IMG_DEV_PHYADDR sKernelPDDevPAddr;
+
+ IMG_UINT32 ui32HeapCount; /*!< heap count */
+ IMG_VOID *pvDeviceMemoryHeap;
+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo; /*!< meminfo for CCB in device accessible memory */
+ PVRSRV_SGX_KERNEL_CCB *psKernelCCB; /*!< kernel mode linear address of CCB in device accessible memory */
+ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo; /*!< CCB information structure */
+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo; /*!< meminfo for CCB control in device accessible memory */
+ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl; /*!< kernel mode linear address of CCB control in device accessible memory */
+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; /*!< meminfo for kernel CCB event kicker */
+ IMG_UINT32 *pui32KernelCCBEventKicker; /*!< kernel mode linear address of kernel CCB event kicker */
+#if defined(PDUMP)
+ IMG_UINT32 ui32KernelCCBEventKickerDumpVal; /*!< pdump copy of the kernel CCB event kicker */
+#endif /* PDUMP */
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo; /*!< kernel mode linear address of SGX misc info buffer */
+ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX]; /*!< ukernel host kick offests */
+#if defined(SGX_SUPPORT_HWPROFILING)
+ PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
+#endif
+ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo; /*!< Meminfo for hardware performace circular buffer */
+ PPVRSRV_KERNEL_MEM_INFO psKernelTASigBufferMemInfo; /*!< Meminfo for TA signature buffer */
+ PPVRSRV_KERNEL_MEM_INFO psKernel3DSigBufferMemInfo; /*!< Meminfo for 3D signature buffer */
+#if defined(FIX_HW_BRN_29702)
+ PPVRSRV_KERNEL_MEM_INFO psKernelCFIMemInfo; /*!< Meminfo for cfi */
+#endif
+#if defined(FIX_HW_BRN_29823)
+ PPVRSRV_KERNEL_MEM_INFO psKernelDummyTermStreamMemInfo; /*!< Meminfo for dummy terminate stream */
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ PPVRSRV_KERNEL_MEM_INFO psKernelVDMSnapShotBufferMemInfo; /*!< Meminfo for dummy snapshot buffer */
+ PPVRSRV_KERNEL_MEM_INFO psKernelVDMCtrlStreamBufferMemInfo; /*!< Meminfo for dummy control stream */
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ PPVRSRV_KERNEL_MEM_INFO psKernelVDMStateUpdateBufferMemInfo; /*!< Meminfo for state update buffer */
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo; /*!< Meminfo for EDM status buffer */
+#endif
+ /* Client reference count */
+ IMG_UINT32 ui32ClientRefCount;
+
+ /* cache control word for micro kernel cache flush/invalidates */
+ IMG_UINT32 ui32CacheControl;
+
+ /* client-side build options */
+ IMG_UINT32 ui32ClientBuildOptions;
+
+ /* client-side microkernel structure sizes */
+ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
+
+ /*
+ if we don't preallocate the pagetables we must
+ insert newly allocated page tables dynamically
+ */
+ IMG_VOID *pvMMUContextList;
+
+ /* Copy of registry ForcePTOff entry */
+ IMG_BOOL bForcePTOff;
+
+ IMG_UINT32 ui32EDMTaskReg0;
+ IMG_UINT32 ui32EDMTaskReg1;
+
+ IMG_UINT32 ui32ClkGateCtl;
+ IMG_UINT32 ui32ClkGateCtl2;
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+ IMG_UINT32 ui32MasterClkGateStatusReg;
+ IMG_UINT32 ui32MasterClkGateStatusMask;
+ IMG_UINT32 ui32MasterClkGateStatus2Reg;
+ IMG_UINT32 ui32MasterClkGateStatus2Mask;
+#endif /* SGX_FEATURE_MP */
+ SGX_INIT_SCRIPTS sScripts;
+
+ /* Members associated with dummy PD needed for BIF reset */
+ IMG_HANDLE hBIFResetPDOSMemHandle;
+ IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
+ IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
+ IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
+ IMG_UINT32 *pui32BIFResetPD;
+ IMG_UINT32 *pui32BIFResetPT;
+
+
+#if defined(SUPPORT_HW_RECOVERY)
+ /* Timeout callback handle */
+ IMG_HANDLE hTimer;
+ /* HW recovery Time stamp */
+ IMG_UINT32 ui32TimeStamp;
+#endif
+
+ /* Number of SGX resets */
+ IMG_UINT32 ui32NumResets;
+
+ /* host control */
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
+ SGXMKIF_HOST_CTL *psSGXHostCtl;
+
+ /* TA/3D control */
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXPTLAWriteBackMemInfo;
+#endif
+
+ IMG_UINT32 ui32Flags;
+
+ /* memory tiling range usage */
+ IMG_UINT32 ui32MemTilingUsage;
+
+ #if defined(PDUMP)
+ PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
+ #endif
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ /* SGX MMU dummy page details */
+ IMG_VOID *pvDummyPTPageCpuVAddr;
+ IMG_DEV_PHYADDR sDummyPTDevPAddr;
+ IMG_HANDLE hDummyPTPageOSMemHandle;
+ IMG_VOID *pvDummyDataPageCpuVAddr;
+ IMG_DEV_PHYADDR sDummyDataDevPAddr;
+ IMG_HANDLE hDummyDataPageOSMemHandle;
+#endif
+#if defined(PDUMP)
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+#endif
+ IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA];
+
+#if defined(FIX_HW_BRN_31620)
+ /* Dummy page refs */
+ IMG_VOID *pvBRN31620DummyPageCpuVAddr;
+ IMG_HANDLE hBRN31620DummyPageOSMemHandle;
+ IMG_DEV_PHYADDR sBRN31620DummyPageDevPAddr;
+
+ /* Dummy PT refs */
+ IMG_VOID *pvBRN31620DummyPTCpuVAddr;
+ IMG_HANDLE hBRN31620DummyPTOSMemHandle;
+ IMG_DEV_PHYADDR sBRN31620DummyPTDevPAddr;
+
+ IMG_HANDLE hKernelMMUContext;
+#endif
+
+} PVRSRV_SGXDEV_INFO;
+
+
+typedef struct _SGX_TIMING_INFORMATION_
+{
+ IMG_UINT32 ui32CoreClockSpeed;
+ IMG_UINT32 ui32HWRecoveryFreq;
+ IMG_BOOL bEnableActivePM;
+ IMG_UINT32 ui32ActivePowManLatencyms;
+ IMG_UINT32 ui32uKernelFreq;
+} SGX_TIMING_INFORMATION;
+
+/* FIXME Rename this structure to sg more generalised as it's been extended*/
+/* SGX device map */
+typedef struct _SGX_DEVICE_MAP_
+{
+ IMG_UINT32 ui32Flags;
+
+ /* Registers */
+ IMG_SYS_PHYADDR sRegsSysPBase;
+ IMG_CPU_PHYADDR sRegsCpuPBase;
+ IMG_CPU_VIRTADDR pvRegsCpuVBase;
+ IMG_UINT32 ui32RegsSize;
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ IMG_SYS_PHYADDR sHPSysPBase;
+ IMG_CPU_PHYADDR sHPCpuPBase;
+ IMG_UINT32 ui32HPSize;
+#endif
+
+ /* Local Device Memory Region: (if present) */
+ IMG_SYS_PHYADDR sLocalMemSysPBase;
+ IMG_DEV_PHYADDR sLocalMemDevPBase;
+ IMG_CPU_PHYADDR sLocalMemCpuPBase;
+ IMG_UINT32 ui32LocalMemSize;
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+ IMG_UINT32 ui32ExtSysCacheRegsSize;
+ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
+#endif
+
+ /* device interrupt IRQ */
+ IMG_UINT32 ui32IRQ;
+
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+ /* timing information*/
+ SGX_TIMING_INFORMATION sTimingInfo;
+#endif
+#if defined(PDUMP)
+ /* pdump memory region name */
+ IMG_CHAR *pszPDumpDevName;
+#endif
+} SGX_DEVICE_MAP;
+
+
+struct _PVRSRV_STUB_PBDESC_
+{
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32TotalPBSize;
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
+ IMG_UINT32 ui32SubKernelMemInfosCount;
+ IMG_HANDLE hDevCookie;
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr;
+ PVRSRV_STUB_PBDESC *psNext;
+ PVRSRV_STUB_PBDESC **ppsThis;
+};
+
+/*!
+ ******************************************************************************
+ * CCB control structure for SGX
+ *****************************************************************************/
+typedef struct _PVRSRV_SGX_CCB_INFO_
+{
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo; /*!< meminfo for CCB in device accessible memory */
+ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo; /*!< meminfo for CCB control in device accessible memory */
+ SGXMKIF_COMMAND *psCommands; /*!< linear address of the array of commands */
+ IMG_UINT32 *pui32WriteOffset; /*!< linear address of the write offset into array of commands */
+ volatile IMG_UINT32 *pui32ReadOffset; /*!< linear address of the read offset into array of commands */
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff; /*!< for pdumping */
+#endif
+} PVRSRV_SGX_CCB_INFO;
+
+
+typedef struct _SGX_BRIDGE_INIT_INFO_KM_
+{
+ IMG_HANDLE hKernelCCBMemInfo;
+ IMG_HANDLE hKernelCCBCtlMemInfo;
+ IMG_HANDLE hKernelCCBEventKickerMemInfo;
+ IMG_HANDLE hKernelSGXHostCtlMemInfo;
+ IMG_HANDLE hKernelSGXTA3DCtlMemInfo;
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ IMG_HANDLE hKernelSGXPTLAWriteBackMemInfo;
+#endif
+ IMG_HANDLE hKernelSGXMiscMemInfo;
+
+ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
+
+ SGX_INIT_SCRIPTS sScripts;
+
+ IMG_UINT32 ui32ClientBuildOptions;
+ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+ IMG_HANDLE hKernelHWProfilingMemInfo;
+#endif
+#if defined(SUPPORT_SGX_HWPERF)
+ IMG_HANDLE hKernelHWPerfCBMemInfo;
+#endif
+ IMG_HANDLE hKernelTASigBufferMemInfo;
+ IMG_HANDLE hKernel3DSigBufferMemInfo;
+
+#if defined(FIX_HW_BRN_29702)
+ IMG_HANDLE hKernelCFIMemInfo;
+#endif
+#if defined(FIX_HW_BRN_29823)
+ IMG_HANDLE hKernelDummyTermStreamMemInfo;
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ IMG_HANDLE hKernelEDMStatusBufferMemInfo;
+#endif
+
+ IMG_UINT32 ui32EDMTaskReg0;
+ IMG_UINT32 ui32EDMTaskReg1;
+
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+// IMG_UINT32 ui32MasterClkGateStatusReg;
+// IMG_UINT32 ui32MasterClkGateStatusMask;
+// IMG_UINT32 ui32MasterClkGateStatus2Reg;
+// IMG_UINT32 ui32MasterClkGateStatus2Mask;
+#endif /* SGX_FEATURE_MP */
+
+ IMG_UINT32 ui32CacheControl;
+
+ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA];
+ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
+
+} SGX_BRIDGE_INIT_INFO_KM;
+
+
+typedef struct _SGX_INTERNEL_STATUS_UPDATE_KM_
+{
+ CTL_STATUS sCtlStatus;
+ IMG_HANDLE hKernelMemInfo;
+} SGX_INTERNEL_STATUS_UPDATE_KM;
+
+
+typedef struct _SGX_CCB_KICK_KM_
+{
+ SGXMKIF_COMMAND sCommand;
+ IMG_HANDLE hCCBKernelMemInfo;
+
+ IMG_UINT32 ui32NumDstSyncObjects;
+ IMG_HANDLE hKernelHWSyncListMemInfo;
+
+ /* DST syncs */
+ IMG_HANDLE *pahDstSyncHandles;
+
+ IMG_UINT32 ui32NumTAStatusVals;
+ IMG_UINT32 ui32Num3DStatusVals;
+
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ SGX_INTERNEL_STATUS_UPDATE_KM asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
+ SGX_INTERNEL_STATUS_UPDATE_KM as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
+#else
+ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
+ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
+#endif
+
+ IMG_BOOL bFirstKickOrResume;
+#if defined(NO_HARDWARE) || defined(PDUMP)
+ IMG_BOOL bTerminateOrAbort;
+#endif
+
+ /* CCB offset of data structure associated with this kick */
+ IMG_UINT32 ui32CCBOffset;
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ /* SRC and DST syncs */
+ IMG_UINT32 ui32NumTASrcSyncs;
+ IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
+ IMG_UINT32 ui32NumTADstSyncs;
+ IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
+ IMG_UINT32 ui32Num3DSrcSyncs;
+ IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
+#else
+ /* SRC syncs */
+ IMG_UINT32 ui32NumSrcSyncs;
+ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA];
+#endif
+
+ /* TA/3D dependency data */
+ IMG_BOOL bTADependency;
+ IMG_HANDLE hTA3DSyncInfo;
+
+ IMG_HANDLE hTASyncInfo;
+ IMG_HANDLE h3DSyncInfo;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+#if defined(NO_HARDWARE)
+ IMG_UINT32 ui32WriteOpsPendingVal;
+#endif
+} SGX_CCB_KICK_KM;
+
+
+#if defined(TRANSFER_QUEUE)
+typedef struct _PVRSRV_TRANSFER_SGX_KICK_KM_
+{
+ IMG_HANDLE hCCBMemInfo;
+ IMG_UINT32 ui32SharedCmdCCBOffset;
+
+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
+
+ IMG_HANDLE hTASyncInfo;
+ IMG_HANDLE h3DSyncInfo;
+
+ IMG_UINT32 ui32NumSrcSync;
+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+
+ IMG_UINT32 ui32NumDstSync;
+ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+
+ IMG_UINT32 ui32Flags;
+
+ IMG_UINT32 ui32PDumpFlags;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+} PVRSRV_TRANSFER_SGX_KICK_KM, *PPVRSRV_TRANSFER_SGX_KICK_KM;
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct _PVRSRV_2D_SGX_KICK_KM_
+{
+ IMG_HANDLE hCCBMemInfo;
+ IMG_UINT32 ui32SharedCmdCCBOffset;
+
+ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
+
+ IMG_UINT32 ui32NumSrcSync;
+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
+
+ /* need to be able to check reads and writes on dest, and update writes */
+ IMG_HANDLE hDstSyncInfo;
+
+ /* need to be able to check reads and writes on TA ops, and update writes */
+ IMG_HANDLE hTASyncInfo;
+
+ /* need to be able to check reads and writes on 2D ops, and update writes */
+ IMG_HANDLE h3DSyncInfo;
+
+ IMG_UINT32 ui32PDumpFlags;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+} PVRSRV_2D_SGX_KICK_KM, *PPVRSRV_2D_SGX_KICK_KM;
+#endif /* defined(SGX_FEATURE_2D_HARDWARE) */
+#endif /* #if defined(TRANSFER_QUEUE) */
+
+/****************************************************************************/
+/* kernel only functions prototypes */
+/****************************************************************************/
+PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_VOID SGXOSTimer(IMG_VOID *pvData);
+
+IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery,
+ IMG_UINT32 ui32PDUMPFlags);
+
+IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags);
+
+PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery);
+PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
+
+PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bDumpSGXRegs);
+
+PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#if defined(SGX_DYNAMIC_TIMING_INFO)
+IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo);
+#endif
+
+/****************************************************************************/
+/* kernel only functions: */
+/****************************************************************************/
+#if defined(NO_HARDWARE)
+static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32StatusRegister,
+ IMG_UINT32 ui32StatusValue,
+ IMG_UINT32 ui32StatusMask)
+{
+ IMG_UINT32 ui32RegVal;
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
+
+ ui32RegVal &= ~ui32StatusMask;
+ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __SGXINFOKM_H__ */
+
+/*****************************************************************************
+ End of file (sgxinfokm.h)
+*****************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxinit.c b/pvr-source/services4/srvkm/devices/sgx/sgxinit.c
new file mode 100644
index 0000000..199aa9d
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxinit.c
@@ -0,0 +1,3428 @@
+/*************************************************************************/ /*!
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "sgxmmu.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgx_mkif_km.h"
+#include "sgxconfig.h"
+#include "sysconfig.h"
+#include "pvr_bridge_km.h"
+
+#include "sgx_bridge_km.h"
+
+#include "pdump_km.h"
+#include "ra.h"
+#include "mmu.h"
+#include "handle.h"
+#include "perproc.h"
+
+#include "sgxutils.h"
+#include "pvrversion.h"
+#include "sgx_options.h"
+
+#include "lists.h"
+#include "srvkm.h"
+#include "ttrace.h"
+
+extern int powering_down;
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+
+static const IMG_CHAR *SGXUKernelStatusString(IMG_UINT32 code)
+{
+ switch(code)
+ {
+#define MKTC_ST(x) \
+ case x: \
+ return #x;
+#include "sgx_ukernel_status_codes.h"
+ default:
+ return "(Unknown)";
+ }
+}
+
+#endif /* defined(PVRSRV_USSE_EDM_STATUS_DEBUG) */
+
+#define VAR(x) #x
+/* PRQA S 0881 11 */ /* ignore 'order of evaluation' warning */
+#define CHECK_SIZE(NAME) \
+{ \
+ if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \
+ { \
+ PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \
+ VAR(NAME), \
+ psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \
+ psSGXStructSizes->ui32Sizeof_##NAME )); \
+ bStructSizesFailed = IMG_TRUE; \
+ } \
+}
+
+#if defined (SYS_USING_INTERRUPTS)
+IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
+#endif
+
+
+static
+PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext);
+#if defined(PDUMP)
+static
+PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function SGXCommandComplete
+
+ @Description
+
+ SGX command complete handler
+
+ @Input psDeviceNode - SGX device node
+
+ @Return none
+
+******************************************************************************/
+static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(OS_SUPPORTS_IN_LISR)
+ if (OSInLISR(psDeviceNode->psSysData))
+ {
+ /*
+ * We shouldn't call SGXScheduleProcessQueuesKM in an
+ * LISR, as it may attempt to power up SGX.
+ * We assume that the LISR will schedule the MISR, which
+ * will test the following flag, and call
+ * SGXScheduleProcessQueuesKM if the flag is set.
+ */
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+ }
+ else
+ {
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+ }
+#else
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function DeinitDevInfo
+
+ @Description
+
+ Deinits DevInfo
+
+ @Input none
+
+ @Return none
+
+******************************************************************************/
+static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ if (psDevInfo->psKernelCCBInfo != IMG_NULL)
+ {
+ /*
+ Free CCB info.
+ */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function InitDevInfo
+
+ @Description
+
+ Loads DevInfo
+
+ @Input psDeviceNode
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM *psInitInfo)
+#else
+ SGX_BRIDGE_INIT_INFO *psInitInfo)
+#endif
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ psDevInfo->sScripts = psInitInfo->sScripts;
+
+ psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
+ psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
+ psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
+ psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
+ psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo;
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ psDevInfo->psKernelSGXPTLAWriteBackMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXPTLAWriteBackMemInfo;
+#endif
+
+ psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo;
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+ psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
+#endif
+#if defined(SUPPORT_SGX_HWPERF)
+ psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
+#endif
+ psDevInfo->psKernelTASigBufferMemInfo = psInitInfo->hKernelTASigBufferMemInfo;
+ psDevInfo->psKernel3DSigBufferMemInfo = psInitInfo->hKernel3DSigBufferMemInfo;
+#if defined(FIX_HW_BRN_29702)
+ psDevInfo->psKernelCFIMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCFIMemInfo;
+#endif
+#if defined(FIX_HW_BRN_29823)
+ psDevInfo->psKernelDummyTermStreamMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelDummyTermStreamMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ psDevInfo->psKernelVDMSnapShotBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMSnapShotBufferMemInfo;
+ psDevInfo->psKernelVDMCtrlStreamBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMCtrlStreamBufferMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ psDevInfo->psKernelVDMStateUpdateBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMStateUpdateBufferMemInfo;
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo;
+#endif
+ /*
+ * Assign client-side build options for later verification
+ */
+ psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions;
+
+ /*
+ * Assign microkernel IF structure sizes for later verification
+ */
+ psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes;
+
+ /*
+ Setup the kernel version of the CCB control
+ */
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_SGX_CCB_INFO),
+ (IMG_VOID **)&psKernelCCBInfo, 0,
+ "SGX Circular Command Buffer Info");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
+ goto failed_allockernelccb;
+ }
+
+
+ OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
+ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
+ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
+ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
+ psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
+ psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
+ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
+
+ /*
+ Copy the USE code addresses for the host kick.
+ */
+ OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr,
+ SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0]));
+
+ psDevInfo->bForcePTOff = IMG_FALSE;
+
+ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
+
+ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
+ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
+ psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl;
+ psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2;
+ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
+ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+ psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg;
+ psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask;
+ psDevInfo->ui32MasterClkGateStatus2Reg = psInitInfo->ui32MasterClkGateStatus2Reg;
+ psDevInfo->ui32MasterClkGateStatus2Mask = psInitInfo->ui32MasterClkGateStatus2Mask;
+#endif /* SGX_FEATURE_MP */
+
+
+ /* Initialise Dev Data */
+ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
+
+ return PVRSRV_OK;
+
+failed_allockernelccb:
+ DeinitDevInfo(psDevInfo);
+
+ return eError;
+}
+
+
+
+
+static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
+{
+ IMG_UINT32 ui32PC;
+ SGX_INIT_COMMAND *psComm;
+
+ for (ui32PC = 0, psComm = psScript;
+ ui32PC < ui32NumInitCommands;
+ ui32PC++, psComm++)
+ {
+ switch (psComm->eOp)
+ {
+ case SGX_INIT_OP_WRITE_HW_REG:
+ {
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+ PDUMPCOMMENT("SGXRunScript: Write HW reg operation");
+ PDUMPREG(SGX_PDUMPREG_NAME, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+ break;
+ }
+ case SGX_INIT_OP_READ_HW_REG:
+ {
+ OSReadHWReg(psDevInfo->pvRegsBaseKM, psComm->sReadHWReg.ui32Offset);
+#if defined(PDUMP)
+ PDUMPCOMMENT("SGXRunScript: Read HW reg operation");
+ PDumpRegRead(SGX_PDUMPREG_NAME, psComm->sReadHWReg.ui32Offset, PDUMP_FLAGS_CONTINUOUS);
+#endif
+ break;
+ }
+#if defined(PDUMP)
+ case SGX_INIT_OP_PDUMP_HW_REG:
+ {
+ PDUMPCOMMENT("SGXRunScript: Dump HW reg operation");
+ PDUMPREG(SGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
+ break;
+ }
+#endif
+ case SGX_INIT_OP_HALT:
+ {
+ return PVRSRV_OK;
+ }
+ case SGX_INIT_OP_ILLEGAL:
+ /* FALLTHROUGH */
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
+ return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+ }
+ }
+
+ }
+
+ return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+}
+
+#if defined(SUPPORT_MEMORY_TILING)
+static PVRSRV_ERROR SGX_AllocMemTilingRangeInt(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32TilingStride,
+ IMG_UINT32 *pui32RangeIndex)
+{
+ IMG_UINT32 i;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Val;
+
+ /* HW supports 10 ranges */
+ for(i=0; i < SGX_BIF_NUM_TILING_RANGES; i++)
+ {
+ if((psDevInfo->ui32MemTilingUsage & (1U << i)) == 0)
+ {
+ /* mark in use */
+ psDevInfo->ui32MemTilingUsage |= 1U << i;
+ /* output range index if the caller wants it */
+ if(pui32RangeIndex != IMG_NULL)
+ {
+ *pui32RangeIndex = i;
+ }
+ goto RangeAllocated;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,"SGX_AllocMemTilingRange: all tiling ranges in use"));
+ return PVRSRV_ERROR_EXCEEDED_HW_LIMITS;
+
+RangeAllocated:
+
+ /* An improperly aligned range could cause BIF not to tile some memory which is intended to be tiled,
+ * or cause BIF to tile some memory which is not intended to be.
+ */
+ if(ui32Start & ~SGX_BIF_TILING_ADDR_MASK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range start (0x%08X) fails"
+ "alignment test", ui32Start));
+ }
+ if((ui32End + 0x00001000) & ~SGX_BIF_TILING_ADDR_MASK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range end (0x%08X) fails"
+ "alignment test", ui32End));
+ }
+
+ ui32Offset = EUR_CR_BIF_TILE0 + (i<<2);
+
+ ui32Val = ((ui32TilingStride << EUR_CR_BIF_TILE0_CFG_SHIFT) & EUR_CR_BIF_TILE0_CFG_MASK)
+ | (((ui32End>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK)
+ | (((ui32Start>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK)
+ | (EUR_CR_BIF_TILE0_ENABLE << EUR_CR_BIF_TILE0_CFG_SHIFT);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val);
+ PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val);
+
+#if defined(SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS)
+ ui32Offset = EUR_CR_BIF_TILE0_ADDR_EXT + (i<<2);
+
+ ui32Val = (((ui32End>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MAX_MASK)
+ | (((ui32Start>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MIN_MASK);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val);
+ PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val);
+#endif /* SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS */
+
+ return PVRSRV_OK;
+}
+
+#endif /* SUPPORT_MEMORY_TILING */
+
+/*!
+*******************************************************************************
+
+ @Function SGXInitialise
+
+ @Description
+
+ (client invoked) chip-reset and initialisation
+
+ @Input pvDeviceNode - device info. structure
+ @Input bHardwareRecovery - true if recovering powered hardware,
+ false if powering up
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
+ static IMG_BOOL bFirstTime = IMG_TRUE;
+#if defined(PDUMP)
+ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
+#endif /* PDUMP */
+
+#if defined(SGX_FEATURE_MP)
+ /* Slave core clocks must be enabled during reset */
+#else
+ SGXInitClocks(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
+#endif /* SGX_FEATURE_MP */
+
+ /*
+ Part 1 of the initialisation script runs before resetting SGX.
+ */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n");
+ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError));
+ return eError;
+ }
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n");
+
+ /* Reset the chip */
+ psDevInfo->ui32NumResets++;
+
+#if !defined(SGX_FEATURE_MP)
+ bHardwareRecovery |= bFirstTime;
+#endif /* SGX_FEATURE_MP */
+
+ SGXReset(psDevInfo, bHardwareRecovery, PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(EUR_CR_POWER)
+#if defined(SGX531)
+ /*
+ Disable half the pipes.
+ 531 has 2 pipes within a 4 pipe framework, so
+ the 2 redundant pipes must be disabled even
+ though they do not exist.
+ */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 1);
+#else
+ /* set the default pipe count (all fully enabled) */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 0);
+#endif
+#endif
+
+ /* Initialise the kernel CCB event kicker value */
+ *psDevInfo->pui32KernelCCBEventKicker = 0;
+#if defined(PDUMP)
+ if (!bPDumpIsSuspended)
+ {
+ psDevInfo->ui32KernelCCBEventKickerDumpVal = 0;
+ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
+ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
+ sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+ }
+#endif /* PDUMP */
+
+#if defined(SUPPORT_MEMORY_TILING)
+ {
+ /* Initialise EUR_CR_BIF_TILE registers for any tiling heaps */
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDevInfo->pvDeviceMemoryHeap;
+ IMG_UINT32 i;
+
+ psDevInfo->ui32MemTilingUsage = 0;
+
+ for(i=0; i<psDevInfo->ui32HeapCount; i++)
+ {
+ if(psDeviceMemoryHeap[i].ui32XTileStride > 0)
+ {
+ /* Set up the HW control registers */
+ eError = SGX_AllocMemTilingRangeInt(
+ psDevInfo,
+ psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr,
+ psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr
+ + psDeviceMemoryHeap[i].ui32HeapSize,
+ psDeviceMemoryHeap[i].ui32XTileStride,
+ NULL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Unable to allocate SGX BIF tiling range for heap: %s",
+ psDeviceMemoryHeap[i].pszName));
+ break;
+ }
+ }
+ }
+ }
+#endif
+
+ /*
+ Part 2 of the initialisation script runs after resetting SGX.
+ */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n");
+ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError));
+ return eError;
+ }
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n");
+
+ /* Record the system timestamp for the microkernel */
+ psSGXHostCtl->ui32HostClock = OSClockus();
+
+ psSGXHostCtl->ui32InitStatus = 0;
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Reset the SGX microkernel initialisation status\n");
+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Initialise the microkernel\n");
+#endif /* PDUMP */
+
+#if defined(SGX_FEATURE_MULTI_EVENT_KICK)
+ OSWriteMemoryBarrier();
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
+ EUR_CR_EVENT_KICK2_NOW_MASK);
+#else
+ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
+ OSWriteMemoryBarrier();
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
+ EUR_CR_EVENT_KICK_NOW_MASK);
+#endif /* SGX_FEATURE_MULTI_EVENT_KICK */
+
+ OSMemoryBarrier();
+
+#if defined(PDUMP)
+ /*
+ Dump the host kick.
+ */
+ if (!bPDumpIsSuspended)
+ {
+#if defined(SGX_FEATURE_MULTI_EVENT_KICK)
+ PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK);
+#else
+ psDevInfo->ui32KernelCCBEventKickerDumpVal = 1;
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "First increment of the SGX event kicker value\n");
+ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
+ psDevInfo->psKernelCCBEventKickerMemInfo,
+ 0,
+ sizeof(IMG_UINT32),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+ PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK);
+#endif /* SGX_FEATURE_MULTI_EVENT_KICK */
+ }
+#endif /* PDUMP */
+
+#if !defined(NO_HARDWARE)
+ /*
+ Wait for the microkernel to finish initialising.
+ */
+ if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
+
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+
+ return PVRSRV_ERROR_RETRY;
+ }
+#endif /* NO_HARDWARE */
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Wait for the SGX microkernel initialisation to complete");
+ PDUMPMEMPOL(psSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+#endif /* PDUMP */
+
+ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+
+ bFirstTime = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function SGXDeinitialise
+
+ @Description
+
+ (client invoked) chip-reset and deinitialisation
+
+ @Input hDevCookie - device info. handle
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
+
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
+ PVRSRV_ERROR eError;
+
+ /* Did SGXInitialise map the SGX registers in? */
+ if (psDevInfo->pvRegsBaseKM == IMG_NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function DevInitSGXPart1
+
+ @Description
+
+ Reset and initialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
+{
+ IMG_HANDLE hDevMemHeap = IMG_NULL;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_HANDLE hKernelDevMemContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ IMG_UINT32 i;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
+ PVRSRV_ERROR eError;
+
+ /* pdump info about the core */
+ PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
+
+ #if defined(SGX_FEATURE_MP)
+ #if !defined(SGX_FEATURE_MP_PLUS)
+ PDUMPCOMMENT("SGX Multi-processor: %d cores", SGX_FEATURE_MP_CORE_COUNT);
+ #else
+ PDUMPCOMMENT("SGX Multi-processor: %d TA cores, %d 3D cores", SGX_FEATURE_MP_CORE_COUNT_TA, SGX_FEATURE_MP_CORE_COUNT_3D);
+ #endif
+ #endif /* SGX_FEATURE_MP */
+
+#if (SGX_CORE_REV == 0)
+ PDUMPCOMMENT("SGX Core Revision Information: head RTL");
+#else
+ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
+#endif
+
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ PDUMPCOMMENT("SGX System Level Cache is present\r\n");
+ #if defined(SGX_BYPASS_SYSTEM_CACHE)
+ PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n");
+ #endif /* SGX_BYPASS_SYSTEM_CACHE */
+ #endif /* SGX_FEATURE_SYSTEM_CACHE */
+
+ PDUMPCOMMENT("SGX Initialisation Part 1");
+
+ /* Allocate device control block */
+ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_SGXDEV_INFO),
+ (IMG_VOID **)&psDevInfo, IMG_NULL,
+ "SGX Device Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
+
+ /* setup info from jdisplayconfig.h (variations controlled by build) */
+ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
+ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
+
+ /* Initialize SGX idle status */
+ psDevInfo->bSGXIdle = IMG_TRUE;
+
+ /* Store the devinfo as its needed by dynamically enumerated systems called from BM */
+ psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
+
+ /* get heap info from the devnode */
+ psDevInfo->ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
+ psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
+
+ /* create the kernel memory context */
+ hKernelDevMemContext = BM_CreateContext(psDeviceNode,
+ &sPDDevPAddr,
+ IMG_NULL,
+ IMG_NULL);
+ if (hKernelDevMemContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
+
+ /* create the kernel, shared and shared_exported heaps */
+ for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
+ {
+ switch(psDeviceMemoryHeap[i].DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_KERNEL:
+ case DEVICE_MEMORY_HEAP_SHARED:
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+ /* Shared PB heap could be zero size */
+ if (psDeviceMemoryHeap[i].ui32HeapSize > 0)
+ {
+ hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
+ &psDeviceMemoryHeap[i]);
+ /*
+ in the case of kernel context heaps just store
+ the heap handle in the heap info structure
+ */
+ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
+ }
+ break;
+ }
+ }
+ }
+#if defined(PDUMP)
+ if(hDevMemHeap)
+ {
+ /* set up the MMU pdump info */
+ psDevInfo->sMMUAttrib = *((BM_HEAP*)hDevMemHeap)->psMMUAttrib;
+ }
+#endif
+ eError = MMU_BIFResetPDAlloc(psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function SGXGetInfoForSrvinitKM
+
+ @Description
+
+ Get SGX related information necessary for initilisation server
+
+ @Input hDevHandle - device handle
+ psInitInfo - pointer to structure for returned information
+
+ @Output psInitInfo - pointer to structure containing returned information
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, PVRSRV_HEAP_INFO_KM *pasHeapInfo, IMG_DEV_PHYADDR *psPDDevPAddr)
+#else
+PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
+#endif
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("SGXGetInfoForSrvinit");
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ *psPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
+
+ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, pasHeapInfo);
+#else
+ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
+
+ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function DevInitSGXPart2KM
+
+ @Description
+
+ Reset and initialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM *psInitInfo)
+#else
+ SGX_BRIDGE_INIT_INFO *psInitInfo)
+#endif
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+ SGX_DEVICE_MAP *psSGXDeviceMap;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
+
+ PDUMPCOMMENT("SGX Initialisation Part 2");
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ /*
+ Init devinfo
+ */
+ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
+ goto failed_init_dev_info;
+ }
+
+
+ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ /* Registers already mapped? */
+ if (psSGXDeviceMap->pvRegsCpuVBase)
+ {
+ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
+ }
+ else
+ {
+ /* Map Regs */
+ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
+ psSGXDeviceMap->ui32RegsSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ if (!psDevInfo->pvRegsBaseKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ }
+ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
+ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
+
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
+ {
+ /* Map Host Port */
+ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase,
+ psSGXDeviceMap->ui32HPSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ if (!psDevInfo->pvHostPortBaseKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize;
+ psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase;
+ }
+#endif/* #ifdef SGX_FEATURE_HOST_PORT */
+
+#if defined (SYS_USING_INTERRUPTS)
+
+ /* Set up ISR callback information. */
+ psDeviceNode->pvISRData = psDeviceNode;
+ /* ISR handler address was set up earlier */
+ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
+
+#endif /* SYS_USING_INTERRUPTS */
+
+ /* Prevent the microkernel being woken up before there is something to do. */
+ psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+ /* Register the device with the power manager. */
+ eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
+ &SGXPrePowerState, &SGXPostPowerState,
+ &SGXPreClockSpeedChange, &SGXPostClockSpeedChange,
+ (IMG_HANDLE)psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ eDefaultPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
+ return eError;
+ }
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+ /* map the external system cache control registers into the SGX MMU */
+ psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize;
+ psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase;
+ eError = MMU_MapExtSystemCacheRegs(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers"));
+ return eError;
+ }
+#endif /* SUPPORT_EXTERNAL_SYSTEM_CACHE */
+
+ /*
+ Initialise the Kernel CCB
+ */
+ OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
+ OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
+ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
+ PDUMPCOMMENT("Initialise Kernel CCB");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
+ PDUMPCOMMENT("Initialise Kernel CCB Control");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
+ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+
+ return PVRSRV_OK;
+
+failed_init_dev_info:
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function DevDeInitSGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Heap;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ SGX_DEVICE_MAP *psSGXDeviceMap;
+
+ if (!psDevInfo)
+ {
+ /* Can happen if DevInitSGX failed */
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_HW_RECOVERY)
+ if (psDevInfo->hTimer)
+ {
+ eError = OSRemoveTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
+ return eError;
+ }
+ psDevInfo->hTimer = IMG_NULL;
+ }
+#endif /* SUPPORT_HW_RECOVERY */
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+ /* unmap the external system cache control registers */
+ eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers"));
+ return eError;
+ }
+#endif /* SUPPORT_EXTERNAL_SYSTEM_CACHE */
+
+ MMU_BIFResetPDFree(psDevInfo);
+
+ /*
+ DeinitDevInfo the DevInfo
+ */
+ DeinitDevInfo(psDevInfo);
+
+ /* Destroy heaps. */
+ psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
+ for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
+ {
+ switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_KERNEL:
+ case DEVICE_MEMORY_HEAP_SHARED:
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
+ {
+ BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Destroy the kernel context. */
+ eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
+ return eError;
+ }
+
+ /* remove the device from the power manager */
+ eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!"));
+ return eError;
+ }
+
+ /* Only unmap the registers if they were mapped here */
+ if (!psSGXDeviceMap->pvRegsCpuVBase)
+ {
+ /* UnMap Regs */
+ if (psDevInfo->pvRegsBaseKM != IMG_NULL)
+ {
+ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ }
+ }
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
+ {
+ /* unMap Host Port */
+ if (psDevInfo->pvHostPortBaseKM != IMG_NULL)
+ {
+ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
+ psDevInfo->ui32HPSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ }
+ }
+#endif /* #ifdef SGX_FEATURE_HOST_PORT */
+
+
+ /* DeAllocate devinfo */
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_SGXDEV_INFO),
+ psDevInfo,
+ 0);
+
+ psDeviceNode->pvDevice = IMG_NULL;
+
+ if (psDeviceMemoryHeap != IMG_NULL)
+ {
+ /* Free the device memory heap info. */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
+ psDeviceMemoryHeap,
+ 0);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP)
+
+/*!
+*******************************************************************************
+
+ @Function SGXDumpMasterDebugReg
+
+ @Description
+
+ Dump a single SGX debug register value
+
+ @Input psDevInfo - SGX device info
+ @Input pszName - string used for logging
+ @Input ui32RegAddr - SGX register offset
+
+ @Return IMG_VOID
+
+******************************************************************************/
+static IMG_VOID SGXDumpMasterDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32RegAddr)
+{
+ IMG_UINT32 ui32RegVal;
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_LOG(("(HYD) %s%08X", pszName, ui32RegVal));
+}
+
+#endif /* defined(RESTRICTED_REGISTERS) */
+
+/*!
+*******************************************************************************
+
+ @Function SGXDumpDebugReg
+
+ @Description
+
+ Dump a single SGX debug register value
+
+ @Input psDevInfo - SGX device info
+ @Input ui32CoreNum - processor number
+ @Input pszName - string used for logging
+ @Input ui32RegAddr - SGX register offset
+
+ @Return IMG_VOID
+
+******************************************************************************/
+static IMG_VOID SGXDumpDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CoreNum,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32RegAddr)
+{
+ IMG_UINT32 ui32RegVal;
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(ui32RegAddr, ui32CoreNum));
+ PVR_LOG(("(P%u) %s%08X", ui32CoreNum, pszName, ui32RegVal));
+}
+
+#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) || defined(FIX_HW_BRN_31620)
+static INLINE IMG_UINT32 GetDirListBaseReg(IMG_UINT32 ui32Index)
+{
+ if (ui32Index == 0)
+ {
+ return EUR_CR_BIF_DIR_LIST_BASE0;
+ }
+ else
+ {
+ return (EUR_CR_BIF_DIR_LIST_BASE1 + ((ui32Index - 1) * 0x4));
+ }
+}
+#endif
+
+void dsscomp_kdump(void);
+/*!
+*******************************************************************************
+
+ @Function SGXDumpDebugInfo
+
+ @Description
+
+ Dump useful debugging info
+
+ @Input psDevInfo - SGX device info
+ @Input bDumpSGXRegs - Whether to dump SGX debug registers. Must not be done
+ when SGX is not powered.
+
+ @Return IMG_VOID
+
+******************************************************************************/
+IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bDumpSGXRegs)
+{
+ IMG_UINT32 ui32CoreNum;
+
+ dsscomp_kdump();
+
+ PVR_LOG(("SGX debug (%s)", PVRVERSION_STRING));
+
+ if (bDumpSGXRegs)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%08X", (IMG_UINTPTR_T)psDevInfo->pvRegsBaseKM));
+ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x%08X", psDevInfo->sRegsPhysBase.uiAddr));
+
+ SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_ID: ", EUR_CR_CORE_ID);
+ SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_REVISION: ", EUR_CR_CORE_REVISION);
+#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP)
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_INT_STAT: ", EUR_CR_MASTER_BIF_INT_STAT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_FAULT: ",EUR_CR_MASTER_BIF_FAULT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_CLKGATESTATUS2: ",EUR_CR_MASTER_CLKGATESTATUS2 );
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_PIM_STATUS: ",EUR_CR_MASTER_VDM_PIM_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_BANK_SET: ",EUR_CR_MASTER_BIF_BANK_SET);
+
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS: ",EUR_CR_MASTER_EVENT_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS2: ",EUR_CR_MASTER_EVENT_STATUS2);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_MP_PRIMITIVE: ",EUR_CR_MASTER_MP_PRIMITIVE);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_DPLIST_STATUS: ",EUR_CR_MASTER_DPM_DPLIST_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC: ",EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_PAGE_MANAGEOP: ",EUR_CR_MASTER_DPM_PAGE_MANAGEOP);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_WAIT_FOR_KICK: ",EUR_CR_MASTER_VDM_WAIT_FOR_KICK);
+#endif
+ for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++)
+ {
+ /* Dump HW event status */
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS: ", EUR_CR_EVENT_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS2: ", EUR_CR_EVENT_STATUS2);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_CTRL: ", EUR_CR_BIF_CTRL);
+ #if defined(EUR_CR_BIF_BANK0)
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_BANK0: ", EUR_CR_BIF_BANK0);
+ #endif
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_INT_STAT: ", EUR_CR_BIF_INT_STAT);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_FAULT: ", EUR_CR_BIF_FAULT);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_MEM_REQ_STAT: ", EUR_CR_BIF_MEM_REQ_STAT);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATECTL: ", EUR_CR_CLKGATECTL);
+ #if defined(EUR_CR_PDS_PC_BASE)
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_PDS_PC_BASE: ", EUR_CR_PDS_PC_BASE);
+ #endif
+#if defined(RESTRICTED_REGISTERS)
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_BANK_SET: ", EUR_CR_BIF_BANK_SET);
+
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATECTL: ", EUR_CR_CLKGATECTL);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATESTATUS: ", EUR_CR_CLKGATESTATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_MTE_CTRL: ", EUR_CR_MTE_CTRL);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_OTHER_PDS_EXEC: ", EUR_CR_EVENT_OTHER_PDS_EXEC);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_OTHER_PDS_DATA: ", EUR_CR_EVENT_OTHER_PDS_DATA);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_OTHER_PDS_INFO: ", EUR_CR_EVENT_OTHER_PDS_INFO);
+
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_ZLS_PAGE_THRESHOLD: ", EUR_CR_DPM_ZLS_PAGE_THRESHOLD);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_TA_GLOBAL_LIST: ", EUR_CR_DPM_TA_GLOBAL_LIST);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_STATE_CONTEXT_ID: ", EUR_CR_DPM_STATE_CONTEXT_ID);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_CONTEXT_PB_BASE: ", EUR_CR_DPM_CONTEXT_PB_BASE);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS1: ", EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS1);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_3D_FREE_LIST_STATUS1: ", EUR_CR_DPM_3D_FREE_LIST_STATUS1);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS2: ", EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS2);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_3D_FREE_LIST_STATUS2: ", EUR_CR_DPM_3D_FREE_LIST_STATUS2);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_ABORT_STATUS_MTILE: ", EUR_CR_DPM_ABORT_STATUS_MTILE);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_PAGE_STATUS: ", EUR_CR_DPM_PAGE_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_PAGE: ", EUR_CR_DPM_PAGE);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_GLOBAL_PAGE_STATUS: ", EUR_CR_DPM_GLOBAL_PAGE_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_LOAD_STATUS: ", EUR_CR_VDM_CONTEXT_LOAD_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_STORE_STATUS: ", EUR_CR_VDM_CONTEXT_STORE_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_TASK_KICK_STATUS: ", EUR_CR_VDM_TASK_KICK_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_STORE_STATE0: ", EUR_CR_VDM_CONTEXT_STORE_STATE0);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_STORE_STATE1: ", EUR_CR_VDM_CONTEXT_STORE_STATE1);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_REQUESTING: ", EUR_CR_DPM_REQUESTING);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_REQUESTING: ", EUR_CR_DPM_REQUESTING);
+
+#endif
+ }
+
+ #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && !defined(FIX_HW_BRN_31620)
+ {
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32PDDevPAddr;
+
+ /*
+ If there was a SGX pagefault check the page table too see if the
+ host thinks the fault is correct
+ */
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+ if (ui32RegVal & EUR_CR_BIF_INT_STAT_PF_N_RW_MASK)
+ {
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
+ ui32RegVal &= EUR_CR_BIF_FAULT_ADDR_MASK;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0);
+ ui32PDDevPAddr &= EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK;
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32RegVal);
+ }
+ }
+ #else
+ {
+ IMG_UINT32 ui32FaultAddress;
+ IMG_UINT32 ui32Bank0;
+ IMG_UINT32 ui32DirListIndex;
+ IMG_UINT32 ui32PDDevPAddr;
+
+ ui32FaultAddress = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ EUR_CR_BIF_FAULT);
+ ui32FaultAddress = ui32FaultAddress & EUR_CR_BIF_FAULT_ADDR_MASK;
+
+ ui32Bank0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0);
+
+ /* Check the EDM's's memory context */
+ ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_EDM_MASK) >> EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ GetDirListBaseReg(ui32DirListIndex));
+ PVR_LOG(("Checking EDM memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr));
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress);
+
+ /* Check the TA's memory context */
+ ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_TA_MASK) >> EUR_CR_BIF_BANK0_INDEX_TA_SHIFT;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ GetDirListBaseReg(ui32DirListIndex));
+ PVR_LOG(("Checking TA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr));
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress);
+
+ /* Check the 3D's memory context */
+ ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_3D_MASK) >> EUR_CR_BIF_BANK0_INDEX_3D_SHIFT;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ GetDirListBaseReg(ui32DirListIndex));
+ PVR_LOG(("Checking 3D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr));
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress);
+
+ #if defined(EUR_CR_BIF_BANK0_INDEX_2D_MASK)
+ /* Check the 2D's memory context */
+ ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_2D_MASK) >> EUR_CR_BIF_BANK0_INDEX_2D_SHIFT;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ GetDirListBaseReg(ui32DirListIndex));
+ PVR_LOG(("Checking 2D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr));
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress);
+ #endif
+
+ #if defined(EUR_CR_BIF_BANK0_INDEX_PTLA_MASK)
+ /* Check the 2D's memory context */
+ ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) >> EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ GetDirListBaseReg(ui32DirListIndex));
+ PVR_LOG(("Checking PTLA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr));
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress);
+ #endif
+
+ #if defined(EUR_CR_BIF_BANK0_INDEX_HOST_MASK)
+ /* Check the Host's memory context */
+ ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_HOST_MASK) >> EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ GetDirListBaseReg(ui32DirListIndex));
+ PVR_LOG(("Checking Host memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr));
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress);
+ #endif
+ }
+ #endif
+ }
+ /*
+ Dump out the outstanding queue items.
+ */
+ QueueDumpDebugInfo();
+
+ {
+ /*
+ Dump out the Host control.
+ */
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+ IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psSGXHostCtl;
+ IMG_UINT32 ui32LoopCounter;
+
+ if (psSGXHostCtl->ui32AssertFail != 0)
+ {
+ PVR_LOG(("SGX Microkernel assert fail: 0x%08X", psSGXHostCtl->ui32AssertFail));
+ psSGXHostCtl->ui32AssertFail = 0;
+ }
+
+ PVR_LOG(("SGX Host control:"));
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer);
+ ui32LoopCounter += 4)
+ {
+ PVR_LOG(("\t(HC-%X) 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32HostCtlBuffer),
+ pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1],
+ pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3]));
+ }
+ }
+
+ {
+ /*
+ Dump out the TA/3D control.
+ */
+ IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32LoopCounter;
+
+ PVR_LOG(("SGX TA/3D control:"));
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->uAllocSize / sizeof(*pui32TA3DCtlBuffer);
+ ui32LoopCounter += 4)
+ {
+ PVR_LOG(("\t(T3C-%X) 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer),
+ pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1],
+ pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3]));
+ }
+ }
+
+ #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ {
+ IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32LastStatusCode, ui32WriteOffset;
+
+ ui32LastStatusCode = *pui32MKTraceBuffer;
+ pui32MKTraceBuffer++;
+ ui32WriteOffset = *pui32MKTraceBuffer;
+ pui32MKTraceBuffer++;
+
+ PVR_LOG(("Last SGX microkernel status code: %08X %s",
+ ui32LastStatusCode, SGXUKernelStatusString(ui32LastStatusCode)));
+
+ #if defined(PVRSRV_DUMP_MK_TRACE)
+ /*
+ Dump the raw microkernel trace buffer to the log.
+ */
+ {
+ IMG_UINT32 ui32LoopCounter;
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE;
+ ui32LoopCounter++)
+ {
+ IMG_UINT32 *pui32BufPtr;
+ pui32BufPtr = pui32MKTraceBuffer +
+ (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4);
+ PVR_LOG(("\t(MKT-%X) %08X %08X %08X %08X %s", ui32LoopCounter,
+ pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0],
+ SGXUKernelStatusString(pui32BufPtr[0])));
+ }
+ }
+ #endif /* PVRSRV_DUMP_MK_TRACE */
+ }
+ #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
+
+ {
+ /*
+ Dump out the kernel CCB.
+ */
+ PVR_LOG(("SGX Kernel CCB WO:0x%X RO:0x%X",
+ psDevInfo->psKernelCCBCtl->ui32WriteOffset,
+ psDevInfo->psKernelCCBCtl->ui32ReadOffset));
+
+ #if defined(PVRSRV_DUMP_KERNEL_CCB)
+ {
+ IMG_UINT32 ui32LoopCounter;
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < sizeof(psDevInfo->psKernelCCB->asCommands) /
+ sizeof(psDevInfo->psKernelCCB->asCommands[0]);
+ ui32LoopCounter++)
+ {
+ SGXMKIF_COMMAND *psCommand = &psDevInfo->psKernelCCB->asCommands[ui32LoopCounter];
+
+ PVR_LOG(("\t(KCCB-%X) %08X %08X - %08X %08X %08X %08X", ui32LoopCounter,
+ psCommand->ui32ServiceAddress, psCommand->ui32CacheControl,
+ psCommand->ui32Data[0], psCommand->ui32Data[1],
+ psCommand->ui32Data[2], psCommand->ui32Data[3]));
+ }
+ }
+ #endif /* PVRSRV_DUMP_KERNEL_CCB */
+ }
+ #if defined (TTRACE)
+ PVRSRVDumpTimeTraceBuffers();
+ #endif
+
+}
+
+
+#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
+/*!
+*******************************************************************************
+
+ @Function HWRecoveryResetSGX
+
+ @Description
+
+ Resets SGX
+
+ Note: may be called from an ISR so should not call pdump.
+
+ @Input psDevInfo - dev info
+
+ @Input ui32Component - core component to reset
+
+ @Return IMG_VOID
+
+******************************************************************************/
+static
+IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Component,
+ IMG_UINT32 ui32CallerID)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
+
+#if defined(SUPPORT_HWRECOVERY_TRACE_LIMIT)
+ static IMG_UINT32 ui32Clockinus = 0;
+ static IMG_UINT32 ui32HWRecoveryCount=0;
+ IMG_UINT32 ui32TempClockinus=0;
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(ui32Component);
+
+ /* Debug dumps associated with HWR can be long. Delay system suspend */
+ SysLockSystemSuspend();
+
+ /*
+ Ensure that hardware recovery is serialised with any power transitions.
+ */
+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+ if(eError != PVRSRV_OK)
+ {
+ /*
+ Unable to obtain lock because there is already a power transition
+ in progress.
+ */
+ PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
+ return;
+ }
+
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
+
+ PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
+
+#if defined(SUPPORT_HWRECOVERY_TRACE_LIMIT)
+/*
+ * The following defines are system specific and should be defined in
+ * the corresponding sysconfig.h file. The values indicated are examples only.
+ SYS_SGX_HWRECOVERY_TRACE_RESET_TIME_PERIOD 5000000 //(5 Seconds)
+ SYS_SGX_MAX_HWRECOVERY_OCCURANCE_COUNT 5
+ */
+ ui32TempClockinus = OSClockus();
+ if((ui32TempClockinus-ui32Clockinus) < SYS_SGX_HWRECOVERY_TRACE_RESET_TIME_PERIOD){
+ ui32HWRecoveryCount++;
+ if(SYS_SGX_MAX_HWRECOVERY_OCCURANCE_COUNT <= ui32HWRecoveryCount){
+ OSPanic();
+ }
+ }else{
+ ui32Clockinus = ui32TempClockinus;
+ SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE);
+ ui32HWRecoveryCount = 0;
+ }
+#else
+ SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE);
+#endif
+
+ /* Suspend pdumping. */
+ PDUMPSUSPEND();
+
+ /* Reset and re-initialise SGX. */
+ eError = SGXInitialise(psDevInfo, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
+ }
+
+ /* Resume pdumping. */
+ PDUMPRESUME();
+
+ PVRSRVPowerUnlock(ui32CallerID);
+
+ SysUnlockSystemSuspend();
+
+ /* Send a dummy kick so that we start processing again */
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+
+ /* Flush any old commands from the queues. */
+ PVRSRVProcessQueues(IMG_TRUE);
+}
+#endif /* #if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY) */
+
+
+#if defined(SUPPORT_HW_RECOVERY)
+/*!
+******************************************************************************
+
+ @Function SGXOSTimer
+
+ @Description
+
+ Timer function for SGX
+
+ @Input pvData - private data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_VOID SGXOSTimer(IMG_VOID *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ static IMG_UINT32 ui32EDMTasks = 0;
+ static IMG_UINT32 ui32LockupCounter = 0; /* To prevent false positives */
+ static IMG_UINT32 ui32OpenCLDelayCounter = 0;
+ static IMG_UINT32 ui32NumResets = 0;
+#if defined(FIX_HW_BRN_31093)
+ static IMG_BOOL bBRN31093Inval = IMG_FALSE;
+#endif
+ IMG_UINT32 ui32CurrentEDMTasks;
+ IMG_UINT32 ui32CurrentOpenCLDelayCounter=0;
+ IMG_BOOL bLockup = IMG_FALSE;
+ IMG_BOOL bPoweredDown;
+
+ /* increment a timestamp */
+ psDevInfo->ui32TimeStamp++;
+
+#if defined(NO_HARDWARE)
+ bPoweredDown = IMG_TRUE;
+#else
+ bPoweredDown = (SGXIsDevicePowered(psDeviceNode)) ? IMG_FALSE : IMG_TRUE;
+#endif /* NO_HARDWARE */
+
+ /*
+ * Check whether EDM timer tasks are getting scheduled. If not, assume
+ * that SGX has locked up and reset the chip.
+ */
+
+ /* Check whether the timer should be running */
+ if (bPoweredDown)
+ {
+ ui32LockupCounter = 0;
+ #if defined(FIX_HW_BRN_31093)
+ bBRN31093Inval = IMG_FALSE;
+ #endif
+ }
+ else
+ {
+ /* The PDS timer should be running. */
+ ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
+ if (psDevInfo->ui32EDMTaskReg1 != 0)
+ {
+ ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
+ }
+ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
+ (psDevInfo->ui32NumResets == ui32NumResets))
+ {
+ ui32LockupCounter++;
+ if (ui32LockupCounter == 3)
+ {
+ ui32LockupCounter = 0;
+ ui32CurrentOpenCLDelayCounter = (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount;
+ if(0 != ui32CurrentOpenCLDelayCounter)
+ {
+ if(ui32OpenCLDelayCounter != ui32CurrentOpenCLDelayCounter){
+ ui32OpenCLDelayCounter = ui32CurrentOpenCLDelayCounter;
+ }else{
+ ui32OpenCLDelayCounter -= 1;
+ (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = ui32OpenCLDelayCounter;
+ }
+ goto SGX_NoUKernel_LockUp;
+ }
+
+
+ #if defined(FIX_HW_BRN_31093)
+ if (bBRN31093Inval == IMG_FALSE)
+ {
+ /* It could be a BIF hang so do a INVAL_PTE */
+ #if defined(FIX_HW_BRN_29997)
+ IMG_UINT32 ui32BIFCtrl;
+ /* Pause the BIF before issuing the invalidate */
+ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_PAUSE_MASK);
+ /* delay for 200 clocks */
+ SGXWaitClocks(psDevInfo, 200);
+ #endif
+ /* Flag that we have attempt to un-block the BIF */
+ bBRN31093Inval = IMG_TRUE;
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, EUR_CR_BIF_CTRL_INVAL_PTE_MASK);
+ /* delay for 200 clocks */
+ SGXWaitClocks(psDevInfo, 200);
+
+ #if defined(FIX_HW_BRN_29997)
+ /* un-pause the BIF by restoring the BIF_CTRL */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
+ #endif
+ }
+ else
+ #endif
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
+
+ bLockup = IMG_TRUE;
+ (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = 0;
+ }
+ }
+ }
+ else
+ {
+ #if defined(FIX_HW_BRN_31093)
+ bBRN31093Inval = IMG_FALSE;
+ #endif
+ ui32LockupCounter = 0;
+ ui32EDMTasks = ui32CurrentEDMTasks;
+ ui32NumResets = psDevInfo->ui32NumResets;
+ }
+ }
+SGX_NoUKernel_LockUp:
+
+ if (bLockup)
+ {
+ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
+
+ /* increment the counter so we know the host detected the lockup */
+ psSGXHostCtl->ui32HostDetectedLockups ++;
+
+ /* Reset the chip and process the queues. */
+ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
+ }
+}
+#endif /* defined(SUPPORT_HW_RECOVERY) */
+
+
+
+#if defined(SYS_USING_INTERRUPTS)
+
+/*
+ SGX ISR Handler
+*/
+IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
+{
+ IMG_BOOL bInterruptProcessed = IMG_FALSE;
+
+
+ /* Real Hardware */
+ {
+ IMG_UINT32 ui32EventStatus = 0, ui32EventEnable = 0;
+ IMG_UINT32 ui32EventClear = 0;
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ IMG_UINT32 ui32EventStatus2, ui32EventEnable2;
+#endif
+ IMG_UINT32 ui32EventClear2 = 0;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+
+ /* check for null pointers */
+ if(pvData == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));
+ return bInterruptProcessed;
+ }
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ if(!powering_down) {
+ ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
+ ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
+ }
+
+ /* test only the unmasked bits */
+ ui32EventStatus &= ui32EventEnable;
+
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ if(!powering_down) {
+ ui32EventStatus2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
+ ui32EventEnable2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE2);
+ }
+
+ /* test only the unmasked bits */
+ ui32EventStatus2 &= ui32EventEnable2;
+#endif /* defined(SGX_FEATURE_DATA_BREAKPOINTS) */
+
+ /* Thought: is it better to insist that the bit assignment in
+ the "clear" register(s) matches that of the "status" register(s)?
+ It would greatly simplify this LISR */
+
+ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
+ {
+ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
+ }
+
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK)
+ {
+ ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK;
+ }
+
+ if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK)
+ {
+ ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK;
+ }
+#endif /* defined(SGX_FEATURE_DATA_BREAKPOINTS) */
+
+ if (ui32EventClear || ui32EventClear2)
+ {
+ bInterruptProcessed = IMG_TRUE;
+
+ /* Clear master interrupt bit */
+ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
+
+ if(!powering_down) {
+ /* clear the events */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32EventClear2);
+ }
+ }
+ }
+
+ return bInterruptProcessed;
+}
+
+
+/*
+ SGX MISR Handler
+*/
+static IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
+
+ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) &&
+ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL))
+ {
+ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
+ }
+
+#if defined(OS_SUPPORTS_IN_LISR)
+ if (psDeviceNode->bReProcessDeviceCommandComplete)
+ {
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+ }
+#endif
+
+ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
+}
+#endif /* #if defined (SYS_USING_INTERRUPTS) */
+
+#if defined(SUPPORT_MEMORY_TILING)
+
+IMG_INTERNAL
+PVRSRV_ERROR SGX_AllocMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32XTileStride,
+ IMG_UINT32 *pui32RangeIndex)
+{
+ return SGX_AllocMemTilingRangeInt(psDeviceNode->pvDevice,
+ psMemInfo->sDevVAddr.uiAddr,
+ psMemInfo->sDevVAddr.uiAddr + ((IMG_UINT32) psMemInfo->uAllocSize) + SGX_MMU_PAGE_SIZE - 1,
+ ui32XTileStride,
+ pui32RangeIndex);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SGX_FreeMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RangeIndex)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Val;
+
+ if(ui32RangeIndex >= 10)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGX_FreeMemTilingRange: invalid Range index "));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* clear the usage bit */
+ psDevInfo->ui32MemTilingUsage &= ~(1<<ui32RangeIndex);
+
+ /* disable the range */
+ ui32Offset = EUR_CR_BIF_TILE0 + (ui32RangeIndex<<2);
+ ui32Val = 0;
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val);
+ PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val);
+
+ return PVRSRV_OK;
+}
+
+#endif /* defined(SUPPORT_MEMORY_TILING) */
+
+
+static IMG_VOID SGXCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ #if defined(SGX_FEATURE_MP)
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
+ #else
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ #endif /* SGX_FEATURE_MP */
+}
+
+/*!
+*******************************************************************************
+
+ @Function SGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input: psDeviceNode - device node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+ /* setup details that never change */
+ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
+ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
+#if defined(PDUMP)
+ {
+ /* memory space names are set up in system code */
+ SGX_DEVICE_MAP *psSGXDeviceMemMap;
+ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMemMap);
+
+ psDeviceNode->sDevId.pszPDumpDevName = psSGXDeviceMemMap->pszPDumpDevName;
+ PVR_ASSERT(psDeviceNode->sDevId.pszPDumpDevName != IMG_NULL);
+ }
+
+ psDeviceNode->sDevId.pszPDumpRegName = SGX_PDUMPREG_NAME;
+#endif /* PDUMP */
+
+ psDeviceNode->pfnInitDevice = &DevInitSGXPart1;
+ psDeviceNode->pfnDeInitDevice = &DevDeInitSGX;
+
+ psDeviceNode->pfnInitDeviceCompatCheck = &SGXDevInitCompatCheck;
+#if defined(PDUMP)
+ psDeviceNode->pfnPDumpInitDevice = &SGXResetPDump;
+ psDeviceNode->pfnMMUGetContextID = &MMU_GetPDumpContextID;
+#endif
+ /*
+ MMU callbacks
+ */
+ psDeviceNode->pfnMMUInitialise = &MMU_Initialise;
+ psDeviceNode->pfnMMUFinalise = &MMU_Finalise;
+ psDeviceNode->pfnMMUInsertHeap = &MMU_InsertHeap;
+ psDeviceNode->pfnMMUCreate = &MMU_Create;
+ psDeviceNode->pfnMMUDelete = &MMU_Delete;
+ psDeviceNode->pfnMMUAlloc = &MMU_Alloc;
+ psDeviceNode->pfnMMUFree = &MMU_Free;
+ psDeviceNode->pfnMMUMapPages = &MMU_MapPages;
+ psDeviceNode->pfnMMUMapShadow = &MMU_MapShadow;
+ psDeviceNode->pfnMMUUnmapPages = &MMU_UnmapPages;
+ psDeviceNode->pfnMMUMapScatter = &MMU_MapScatter;
+ psDeviceNode->pfnMMUGetPhysPageAddr = &MMU_GetPhysPageAddr;
+ psDeviceNode->pfnMMUGetPDDevPAddr = &MMU_GetPDDevPAddr;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ psDeviceNode->pfnMMUIsHeapShared = &MMU_IsHeapShared;
+#endif
+#if defined(FIX_HW_BRN_31620)
+ psDeviceNode->pfnMMUGetCacheFlushRange = &MMU_GetCacheFlushRange;
+ psDeviceNode->pfnMMUGetPDPhysAddr = &MMU_GetPDPhysAddr;
+#else
+ psDeviceNode->pfnMMUGetCacheFlushRange = IMG_NULL;
+ psDeviceNode->pfnMMUGetPDPhysAddr = IMG_NULL;
+#endif
+ psDeviceNode->pfnMMUMapPagesSparse = &MMU_MapPagesSparse;
+ psDeviceNode->pfnMMUMapShadowSparse = &MMU_MapShadowSparse;
+
+#if defined (SYS_USING_INTERRUPTS)
+ /*
+ SGX ISR handler
+ */
+ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
+ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
+#endif
+
+#if defined(SUPPORT_MEMORY_TILING)
+ psDeviceNode->pfnAllocMemTilingRange = SGX_AllocMemTilingRange;
+ psDeviceNode->pfnFreeMemTilingRange = SGX_FreeMemTilingRange;
+#endif
+
+ /*
+ SGX command complete handler
+ */
+ psDeviceNode->pfnDeviceCommandComplete = &SGXCommandComplete;
+
+ psDeviceNode->pfnCacheInvalidate = SGXCacheInvalidate;
+
+ /*
+ and setup the device's memory map:
+ */
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ /* size of address space */
+ psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE;
+
+ /* flags, backing store details to be specified by system */
+ psDevMemoryInfo->ui32Flags = 0;
+
+ /* device memory heap info about each heap in a device address space */
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
+ (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0,
+ "Array of Device Memory Heap Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID);
+
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+ /*
+ setup heaps
+ Note: backing store to be setup by system (defaults to UMA)
+ */
+
+ /************* general ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "General";
+ psDeviceMemoryHeap->pszBSName = "General BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ /* specify the mapping heap ID for this device */
+ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+#endif
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+#if defined(SUPPORT_MEMORY_TILING)
+ /************* VPB tiling ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VPB_TILED_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VPB_TILED_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_VPB_TILED_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "VPB Tiled";
+ psDeviceMemoryHeap->pszBSName = "VPB Tiled BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap->ui32XTileStride = SGX_VPB_TILED_HEAP_STRIDE;
+ PVR_DPF((PVR_DBG_WARNING, "VPB tiling heap tiling stride = 0x%x", psDeviceMemoryHeap->ui32XTileStride));
+ psDeviceMemoryHeap++;/* advance to the next heap */
+#endif
+
+#if defined(SUPPORT_ION)
+ /************* Ion Heap ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_ION_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_ION_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_ION_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "Ion";
+ psDeviceMemoryHeap->pszBSName = "Ion BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* specify the ion heap ID for this device */
+ psDevMemoryInfo->ui32IonHeapID = SGX_ION_HEAP_ID;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+#endif
+
+ /************* TA data ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "TA Data";
+ psDeviceMemoryHeap->pszBSName = "TA Data BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* kernel code ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "Kernel Code";
+ psDeviceMemoryHeap->pszBSName = "Kernel Code BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* Kernel Video Data ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "KernelData";
+ psDeviceMemoryHeap->pszBSName = "KernelData BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* PixelShaderUSSE ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
+ /*
+ The actual size of the pixel and vertex shader heap must be such that all
+ addresses are within range of the one of the USSE code base registers, but
+ the addressable range is hardware-dependent.
+ SGX_PIXELSHADER_HEAP_SIZE is defined to be the maximum possible size
+ to ensure that the heap layout is consistent across all SGXs.
+ */
+ psDeviceMemoryHeap->ui32HeapSize = ((10 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000);
+ PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_PIXELSHADER_HEAP_SIZE);
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "PixelShaderUSSE";
+ psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* VertexShaderUSSE ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
+ /* See comment above with PixelShaderUSSE ui32HeapSize */
+ psDeviceMemoryHeap->ui32HeapSize = ((4 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000);
+ PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_VERTEXSHADER_HEAP_SIZE);
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "VertexShaderUSSE";
+ psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* PDS Pixel Code/Data ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "PDSPixelCodeData";
+ psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* PDS Vertex Code/Data ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "PDSVertexCodeData";
+ psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* CacheCoherent ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "CacheCoherent";
+ psDeviceMemoryHeap->pszBSName = "CacheCoherent BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ /* set the sync heap id */
+ psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+ /************* Shared 3D Parameters ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SHARED_3DPARAMETERS_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SHARED_3DPARAMETERS_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_SHARED_3DPARAMETERS_HEAP_SIZE;
+ psDeviceMemoryHeap->pszName = "Shared 3DParameters";
+ psDeviceMemoryHeap->pszBSName = "Shared 3DParameters BS";
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+ /************* Percontext 3D Parameters ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE;
+ psDeviceMemoryHeap->pszName = "Percontext 3DParameters";
+ psDeviceMemoryHeap->pszBSName = "Percontext 3DParameters BS";
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ /************* General Mapping ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "GeneralMapping";
+ psDeviceMemoryHeap->pszBSName = "GeneralMapping BS";
+ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410)
+ /*
+ if((2D hardware is enabled)
+ && (multi-mem contexts enabled)
+ && (BRN23410 is present))
+ - then don't make the heap per-context otherwise
+ the TA and 2D requestors must always be aligned to
+ the same address space which could affect performance
+ */
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+ #else /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) */
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) */
+
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ /* specify the mapping heap ID for this device */
+ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+ psDeviceMemoryHeap++;/* advance to the next heap */
+#endif /* #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) */
+
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+ /************* 2D HW Heap ***************/
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "2D";
+ psDeviceMemoryHeap->pszBSName = "2D BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+ /* set the default (4k). System can override these as required */
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;/* advance to the next heap */
+#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */
+
+
+ /* set the heap count */
+ psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)(psDeviceNode->pvDevice);
+ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
+ PVR_DPF((PVR_DBG_MESSAGE, "Reset pdump CCB write offset."));
+
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXGetClientInfoKM
+
+ @Description Gets the client information
+
+ @Input hDevCookie
+
+ @Output psClientInfo
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
+ SGX_CLIENT_INFO* psClientInfo)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ /*
+ If this is the first client to connect to SGX perform initialisation
+ */
+ psDevInfo->ui32ClientRefCount++;
+
+ /*
+ Copy information to the client info.
+ */
+ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
+
+ /*
+ Copy requested information.
+ */
+ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
+
+ /* just return OK */
+ return PVRSRV_OK;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXPanic
+
+ @Description
+
+ Called when an unrecoverable situation is detected. Dumps SGX debug
+ information and tells the OS to panic.
+
+ @Input psDevInfo - SGX device info
+
+ @Return IMG_VOID
+
+******************************************************************************/
+IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ PVR_LOG(("SGX panic"));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ OSPanic();
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and microkernel (DDK and build options)
+ for SGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
+#if !defined(NO_HARDWARE)
+ PPVRSRV_KERNEL_MEM_INFO psMemInfo;
+ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt; /*!< internal misc info for ukernel */
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes; /*!< microkernel structure sizes */
+ IMG_BOOL bStructSizesFailed;
+
+ /* Exceptions list for core rev check, format is pairs of (hw rev, sw rev) */
+ IMG_BOOL bCheckCoreRev;
+ const IMG_UINT32 aui32CoreRevExceptions[] =
+ {
+ 0x10100, 0x10101
+ };
+ const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32));
+ IMG_UINT i;
+#endif
+
+ /* Ensure it's a SGX device */
+ if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Device not of type SGX"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto chk_exit;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ /*
+ * 1. Check kernel-side and client-side build options
+ * 2. Ensure ukernel DDK version and driver DDK version are compatible
+ * 3. Check ukernel build options against kernel-side build options
+ */
+
+ /*
+ * Check KM build options against client-side host driver
+ */
+
+ ui32BuildOptions = (SGX_BUILD_OPTIONS);
+ if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions)
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions;
+ if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
+ "extra options present in client-side driver: (0x%x). Please check sgx_options.h",
+ psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ }
+
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
+ "extra options present in KM: (0x%x). Please check sgx_options.h",
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ }
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]"));
+ }
+
+#if !defined (NO_HARDWARE)
+ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+
+ /* Clear state (not strictly necessary since this is the first call) */
+ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
+ psSGXMiscInfoInt->ui32MiscInfoFlags = 0;
+ psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES;
+ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, IMG_NULL);
+
+ /*
+ * Validate DDK version
+ */
+ if(eError != PVRSRV_OK)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version"));
+ goto chk_exit;
+ }
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+ if( (psSGXFeatures->ui32DDKVersion !=
+ ((PVRVERSION_MAJ << 16) |
+ (PVRVERSION_MIN << 8) |
+ PVRVERSION_BRANCH) ) ||
+ (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD) )
+ {
+ PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%d)/device DDK revision (%d).",
+ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: driver DDK (%d) and device DDK (%d) match. [ OK ]",
+ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
+ }
+
+ /*
+ * Check hardware core revision is compatible with the one in software
+ */
+ if (psSGXFeatures->ui32CoreRevSW == 0)
+ {
+ /*
+ Head core revision cannot be checked.
+ */
+ PVR_LOG(("SGXInit: HW core rev (%x) check skipped.",
+ psSGXFeatures->ui32CoreRev));
+ }
+ else
+ {
+ /* For some cores the hw/sw core revisions are expected not to match. For these
+ * exceptional cases the core rev compatibility check should be skipped.
+ */
+ bCheckCoreRev = IMG_TRUE;
+ for(i=0; i<ui32NumCoreExceptions; i+=2)
+ {
+ if( (psSGXFeatures->ui32CoreRev==aui32CoreRevExceptions[i]) &&
+ (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) )
+ {
+ PVR_LOG(("SGXInit: HW core rev (%x), SW core rev (%x) check skipped.",
+ psSGXFeatures->ui32CoreRev,
+ psSGXFeatures->ui32CoreRevSW));
+ bCheckCoreRev = IMG_FALSE;
+ }
+ }
+
+ if (bCheckCoreRev)
+ {
+ if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%x) and SW core rev (%x).",
+ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%x) and SW core rev (%x) match. [ OK ]",
+ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
+ }
+ }
+ }
+
+ /*
+ * Check ukernel structure sizes are the same as those in the driver
+ */
+ psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes;
+
+ bStructSizesFailed = IMG_FALSE;
+
+ CHECK_SIZE(HOST_CTL);
+ CHECK_SIZE(COMMAND);
+#if defined(SGX_FEATURE_2D_HARDWARE)
+ CHECK_SIZE(2DCMD);
+ CHECK_SIZE(2DCMD_SHARED);
+#endif
+ CHECK_SIZE(CMDTA);
+ CHECK_SIZE(CMDTA_SHARED);
+ CHECK_SIZE(TRANSFERCMD);
+ CHECK_SIZE(TRANSFERCMD_SHARED);
+
+ CHECK_SIZE(3DREGISTERS);
+ CHECK_SIZE(HWPBDESC);
+ CHECK_SIZE(HWRENDERCONTEXT);
+ CHECK_SIZE(HWRENDERDETAILS);
+ CHECK_SIZE(HWRTDATA);
+ CHECK_SIZE(HWRTDATASET);
+ CHECK_SIZE(HWTRANSFERCONTEXT);
+
+ if (bStructSizesFailed == IMG_TRUE)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes."));
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]"));
+ }
+
+ /*
+ * Check ukernel build options against KM host driver
+ */
+
+ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
+ if (ui32BuildOptions != (SGX_BUILD_OPTIONS))
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
+ if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
+ "extra options present in driver: (0x%x). Please check sgx_options.h",
+ (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch ));
+ }
+
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
+ "extra options present in microkernel: (0x%x). Please check sgx_options.h",
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ }
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]"));
+ }
+#endif // NO_HARDWARE
+
+ eError = PVRSRV_OK;
+chk_exit:
+#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK)
+ return PVRSRV_OK;
+#else
+ return eError;
+#endif
+}
+
+/*
+ * @Function SGXGetMiscInfoUkernel
+ *
+ * @Description Returns misc info (e.g. SGX build info/flags) from microkernel
+ *
+ * @Input psDevInfo : device info from init phase
+ * @Input psDeviceNode : device node, used for scheduling ukernel to query SGX features
+ *
+ * @Return PVRSRV_ERROR :
+ *
+ */
+static
+PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext)
+{
+ PVRSRV_ERROR eError;
+ SGXMKIF_COMMAND sCommandData; /* CCB command data */
+ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt; /*!< internal misc info for ukernel */
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; /*!< sgx features for client */
+ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes; /*!< internal info: microkernel structure sizes */
+
+ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+
+ if (! psMemInfo->pvLinAddrKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
+ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
+ psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes;
+
+ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
+
+ /* Reset SGX features */
+ OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures));
+ OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes));
+
+ /* set up buffer address for SGX features in CCB */
+ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr; /* device V addr of output buffer */
+
+ PDUMPCOMMENT("Microkernel kick for SGXGetMiscInfo");
+ eError = SGXScheduleCCBCommandKM(psDeviceNode,
+ SGXMKIF_CMD_GETMISCINFO,
+ &sCommandData,
+ KERNEL_ID,
+ 0,
+ hDevMemContext,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed."));
+ return eError;
+ }
+
+ /* FIXME: DWORD value to determine code path in ukernel?
+ * E.g. could use getMiscInfo to obtain register values for diagnostics? */
+
+#if !defined(NO_HARDWARE)
+ {
+ IMG_BOOL bExit;
+
+ bExit = IMG_FALSE;
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0)
+ {
+ bExit = IMG_TRUE;
+ break;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /*if the loop exited because a timeout*/
+ if (!bExit)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info."));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+#endif /* NO_HARDWARE */
+
+ return PVRSRV_OK;
+}
+
+
+
+/*
+ * @Function SGXGetMiscInfoKM
+ *
+ * @Description Returns miscellaneous SGX info
+ *
+ * @Input psDevInfo : device info from init phase
+ * @Input psDeviceNode : device node, used for scheduling ukernel to query SGX features
+ *
+ * @Output psMiscInfo : query request plus user-mode mem for holding returned data
+ *
+ * @Return PVRSRV_ERROR :
+ *
+ */
+IMG_EXPORT
+PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ SGX_MISC_INFO *psMiscInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext)
+{
+ PVRSRV_ERROR eError;
+ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+ IMG_UINT32 *pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags;
+
+ /* Reset the misc info state flags */
+ *pui32MiscInfoFlags = 0;
+
+#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
+#endif
+
+ switch(psMiscInfo->eRequest)
+ {
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT:
+ {
+ IMG_UINT32 ui32MaskDM;
+ IMG_UINT32 ui32CtrlWEnable;
+ IMG_UINT32 ui32CtrlREnable;
+ IMG_UINT32 ui32CtrlTrapEnable;
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32StartRegVal;
+ IMG_UINT32 ui32EndRegVal;
+ SGXMKIF_COMMAND sCommandData;
+
+ /* Set or Clear BP? */
+ if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable)
+ {
+ /* set the break point */
+ IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr;
+ IMG_DEV_VIRTADDR sBPDevVAddrEnd = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddrEnd;
+
+ /* BP address */
+ ui32StartRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_START_ADDRESS_MASK;
+ ui32EndRegVal = sBPDevVAddrEnd.uiAddr & EUR_CR_BREAKPOINT0_END_ADDRESS_MASK;
+
+ ui32MaskDM = psMiscInfo->uData.sSGXBreakpointInfo.ui32DataMasterMask;
+ ui32CtrlWEnable = psMiscInfo->uData.sSGXBreakpointInfo.bWrite;
+ ui32CtrlREnable = psMiscInfo->uData.sSGXBreakpointInfo.bRead;
+ ui32CtrlTrapEnable = psMiscInfo->uData.sSGXBreakpointInfo.bTrapped;
+
+ /* normal data BP */
+ ui32RegVal = ((ui32MaskDM<<EUR_CR_BREAKPOINT0_MASK_DM_SHIFT) & EUR_CR_BREAKPOINT0_MASK_DM_MASK) |
+ ((ui32CtrlWEnable<<EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK) |
+ ((ui32CtrlREnable<<EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK) |
+ ((ui32CtrlTrapEnable<<EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK);
+ }
+ else
+ {
+ /* clear the break point */
+ ui32RegVal = ui32StartRegVal = ui32EndRegVal = 0;
+ }
+
+ /* setup the command */
+ sCommandData.ui32Data[0] = psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex;
+ sCommandData.ui32Data[1] = ui32StartRegVal;
+ sCommandData.ui32Data[2] = ui32EndRegVal;
+ sCommandData.ui32Data[3] = ui32RegVal;
+
+ /* clear signal flags */
+ psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
+
+ PDUMPCOMMENT("Microkernel kick for setting a data breakpoint");
+ eError = SGXScheduleCCBCommandKM(psDeviceNode,
+ SGXMKIF_CMD_DATABREAKPOINT,
+ &sCommandData,
+ KERNEL_ID,
+ 0,
+ hDevMemContext,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: SGXScheduleCCBCommandKM failed."));
+ return eError;
+ }
+
+#if defined(NO_HARDWARE)
+ /* clear signal flags */
+ psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
+#else
+ {
+ IMG_BOOL bExit;
+
+ bExit = IMG_FALSE;
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (psDevInfo->psSGXHostCtl->ui32BPSetClearSignal != 0)
+ {
+ bExit = IMG_TRUE;
+ /* clear signal flags */
+ psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
+ break;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /*if the loop exited because a timeout*/
+ if (!bExit)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: Timeout occurred waiting BP set/clear"));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+#endif /* NO_HARDWARE */
+
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_POLL_BREAKPOINT:
+ {
+ /* This request checks to see whether a breakpoint has
+ been trapped. If so, it returns the number of the
+ breakpoint number that was trapped in ui32BPIndex,
+ sTrappedBPDevVAddr to the address which was trapped,
+ and sets bTrappedBP. Otherwise, bTrappedBP will be
+ false, and other fields should be ignored. */
+ /* The uKernel is not used, since if we are stopped on a
+ breakpoint, it is not possible to guarantee that the
+ uKernel would be able to run */
+#if !defined(NO_HARDWARE)
+#if defined(SGX_FEATURE_MP)
+ IMG_BOOL bTrappedBPMaster;
+ IMG_UINT32 ui32CoreNum, ui32TrappedBPCoreNum;
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ IMG_UINT32 ui32PipeNum, ui32TrappedBPPipeNum;
+/* ui32PipeNum is the pipe number plus 1, or 0 to represent "partition" */
+#define NUM_PIPES_PLUS_ONE (SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES+1)
+#endif
+ IMG_BOOL bTrappedBPAny;
+#endif /* defined(SGX_FEATURE_MP) */
+ IMG_BOOL bFoundOne;
+
+#if defined(SGX_FEATURE_MP)
+ ui32TrappedBPCoreNum = 0;
+ bTrappedBPMaster = !!(EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT));
+ bTrappedBPAny = bTrappedBPMaster;
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32TrappedBPPipeNum = 0; /* just to keep the (incorrect) compiler happy */
+#endif
+ for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++)
+ {
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ /* FIXME: this macro makes the assumption that the PARTITION regs are the same
+ distance before the PIPE0 regs as the PIPE1 regs are after it, _and_
+ assumes that the fields in the partition regs are in the same place
+ in the pipe regs. Need to validate these assumptions, or assert them */
+#define SGX_MP_CORE_PIPE_SELECT(r,c,p) \
+ ((SGX_MP_CORE_SELECT(EUR_CR_PARTITION_##r,c) + p*(EUR_CR_PIPE0_##r-EUR_CR_PARTITION_##r)))
+ for (ui32PipeNum = 0; ui32PipeNum < NUM_PIPES_PLUS_ONE; ui32PipeNum++)
+ {
+ bFoundOne =
+ 0 != (EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK &
+ OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_PIPE_SELECT(BREAKPOINT,
+ ui32CoreNum,
+ ui32PipeNum)));
+ if (bFoundOne)
+ {
+ bTrappedBPAny = IMG_TRUE;
+ ui32TrappedBPCoreNum = ui32CoreNum;
+ ui32TrappedBPPipeNum = ui32PipeNum;
+ }
+ }
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ bFoundOne = !!(EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum)));
+ if (bFoundOne)
+ {
+ bTrappedBPAny = IMG_TRUE;
+ ui32TrappedBPCoreNum = ui32CoreNum;
+ }
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ }
+
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = bTrappedBPAny;
+#else /* defined(SGX_FEATURE_MP) */
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ #error Not yet considered the case for per-pipe regs in non-mp case
+#endif
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = 0 != (EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT));
+#endif /* defined(SGX_FEATURE_MP) */
+
+ if (psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP)
+ {
+ IMG_UINT32 ui32Info0, ui32Info1;
+
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum));
+ ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum));
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum));
+ ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum));
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+#else /* defined(SGX_FEATURE_MP) */
+ ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO0);
+ ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO1);
+#endif /* defined(SGX_FEATURE_MP) */
+
+#ifdef SGX_FEATURE_PERPIPE_BKPT_REGS
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK);
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT;
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_MASK);
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SHIFT;
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ /* mp, per-pipe regbanks */
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:(ui32TrappedBPCoreNum + (ui32TrappedBPPipeNum<<10));
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ /* mp, regbanks unsplit */
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:ui32TrappedBPCoreNum;
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+#else /* defined(SGX_FEATURE_MP) */
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ /* non-mp, per-pipe regbanks */
+#error non-mp perpipe regs not yet supported
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ /* non-mp */
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = 65534;
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+#endif /* defined(SGX_FEATURE_MP) */
+ }
+#endif /* !defined(NO_HARDWARE) */
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_RESUME_BREAKPOINT:
+ {
+ /* This request resumes from the currently trapped breakpoint. */
+ /* Core number must be supplied */
+ /* Polls for notify to be acknowledged by h/w */
+#if !defined(NO_HARDWARE)
+#if defined(SGX_FEATURE_MP)
+ IMG_UINT32 ui32CoreNum;
+ IMG_BOOL bMaster;
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ IMG_UINT32 ui32PipeNum;
+#endif
+#endif /* defined(SGX_FEATURE_MP) */
+ IMG_UINT32 ui32OldSeqNum, ui32NewSeqNum;
+
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32PipeNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum >> 10;
+ ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum & 1023;
+ bMaster = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum > 32767;
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum;
+ bMaster = ui32CoreNum > SGX_FEATURE_MP_CORE_COUNT_3D;
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ if (bMaster)
+ {
+ /* master */
+ /* EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK | EUR_CR_MASTER_BREAKPOINT_SEQNUM_MASK */
+ ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT_TRAP, EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_MASK);
+ do
+ {
+ ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT);
+ }
+ while (ui32OldSeqNum == ui32NewSeqNum);
+ }
+ else
+#endif /* defined(SGX_FEATURE_MP) */
+ {
+ /* core */
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum));
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP, ui32CoreNum, ui32PipeNum), EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK);
+ do
+ {
+ ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum));
+ }
+ while (ui32OldSeqNum == ui32NewSeqNum);
+#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum));
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP, ui32CoreNum), EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_BREAKPOINT_TRAP_CONTINUE_MASK);
+ do
+ {
+ ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum));
+ }
+ while (ui32OldSeqNum == ui32NewSeqNum);
+#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */
+ }
+#endif /* !defined(NO_HARDWARE) */
+ return PVRSRV_OK;
+ }
+#endif /* SGX_FEATURE_DATA_BREAKPOINTS) */
+
+ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
+ {
+ psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_ACTIVEPOWER:
+ {
+ psMiscInfo->uData.sActivePower.ui32NumActivePowerEvents = psDevInfo->psSGXHostCtl->ui32NumActivePowerEvents;
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_LOCKUPS:
+ {
+#if defined(SUPPORT_HW_RECOVERY)
+ psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = psDevInfo->psSGXHostCtl->ui32uKernelDetectedLockups;
+ psMiscInfo->uData.sLockups.ui32HostDetectedLockups = psDevInfo->psSGXHostCtl->ui32HostDetectedLockups;
+#else
+ psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = 0;
+ psMiscInfo->uData.sLockups.ui32HostDetectedLockups = 0;
+#endif
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_SPM:
+ {
+ /* this is dealt with in UM */
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_SGXREV:
+ {
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+// PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+
+ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, hDevMemContext);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
+ eError));
+ return eError;
+ }
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+
+ /* Copy SGX features into misc info struct, to return to client */
+ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
+
+ /* Debug output */
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%x, sw ID 0x%x, sw Rev 0x%x\n",
+ psSGXFeatures->ui32CoreRev,
+ psSGXFeatures->ui32CoreIdSW,
+ psSGXFeatures->ui32CoreRevSW));
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%x, DDK build 0x%x\n",
+ psSGXFeatures->ui32DDKVersion,
+ psSGXFeatures->ui32DDKBuild));
+
+ /* done! */
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
+ {
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+
+ /* Reset the misc information to prevent
+ * confusion with values returned from the ukernel
+ */
+ OSMemSet(psMemInfo->pvLinAddrKM, 0,
+ sizeof(PVRSRV_SGX_MISCINFO_INFO));
+
+ psSGXFeatures->ui32DDKVersion =
+ (PVRVERSION_MAJ << 16) |
+ (PVRVERSION_MIN << 8) |
+ PVRVERSION_BRANCH;
+ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
+
+ /* Also report the kernel module build options -- used in SGXConnectionCheck() */
+ psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS);
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ /* Report the EDM status buffer location in memory */
+ psSGXFeatures->sDevVAEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->sDevVAddr;
+ psSGXFeatures->pvEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
+#endif
+
+ /* Copy SGX features into misc info struct, to return to client */
+ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ case SGX_MISC_INFO_REQUEST_MEMREAD:
+ case SGX_MISC_INFO_REQUEST_MEMCOPY:
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+ PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemSrc; /* user-defined mem read */
+ PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemDest; /* user-defined mem write */
+
+ {
+ /* Set the mem read flag; src is user-defined */
+ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD;
+ psSGXMemSrc = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessSrc;
+
+ if(psMiscInfo->sDevVAddrSrc.uiAddr != 0)
+ {
+ psSGXMemSrc->sDevVAddr = psMiscInfo->sDevVAddrSrc; /* src address */
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ if( psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMCOPY)
+ {
+ /* Set the mem write flag; dest is user-defined */
+ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMWRITE;
+ psSGXMemDest = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessDest;
+
+ if(psMiscInfo->sDevVAddrDest.uiAddr != 0)
+ {
+ psSGXMemDest->sDevVAddr = psMiscInfo->sDevVAddrDest; /* dest address */
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ /* Get physical address of PD for memory read (may need to switch context in microkernel) */
+ if(psMiscInfo->hDevMemContext != IMG_NULL)
+ {
+ SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemSrc->sPDDevPAddr);
+
+ /* Single app will always use the same src and dest mem context */
+ psSGXMemDest->sPDDevPAddr = psSGXMemSrc->sPDDevPAddr;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Submit the task to the ukernel */
+ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
+ eError));
+ return eError;
+ }
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL)
+ {
+ return PVRSRV_ERROR_INVALID_MISCINFO;
+ }
+#endif
+ /* Copy SGX features into misc info struct, to return to client */
+ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
+ return PVRSRV_OK;
+ }
+#endif /* SUPPORT_SGX_EDM_MEMORY_DEBUG */
+
+#if defined(SUPPORT_SGX_HWPERF)
+ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
+ {
+ PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS *psSetHWPerfStatus = &psMiscInfo->uData.sSetHWPerfStatus;
+ const IMG_UINT32 ui32ValidFlags = PVRSRV_SGX_HWPERF_STATUS_RESET_COUNTERS |
+ PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON |
+ PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON |
+ PVRSRV_SGX_HWPERF_STATUS_MK_EXECUTION_ON;
+ SGXMKIF_COMMAND sCommandData = {0};
+
+ /* Check for valid flags */
+ if ((psSetHWPerfStatus->ui32NewHWPerfStatus & ~ui32ValidFlags) != 0)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ #if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "SGX ukernel HWPerf status %u\n",
+ psSetHWPerfStatus->ui32NewHWPerfStatus);
+ #endif /* PDUMP */
+
+ /* Copy the new group selector(s) to the host ctl for the ukernel */
+ #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS)
+ OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfGroup[0],
+ &psSetHWPerfStatus->aui32PerfGroup[0],
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup));
+ OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfBit[0],
+ &psSetHWPerfStatus->aui32PerfBit[0],
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit));
+ psDevInfo->psSGXHostCtl->ui32PerfCounterBitSelect = psSetHWPerfStatus->ui32PerfCounterBitSelect;
+ psDevInfo->psSGXHostCtl->ui32PerfSumMux = psSetHWPerfStatus->ui32PerfSumMux;
+ #if defined(PDUMP)
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, aui32PerfGroup),
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, aui32PerfBit),
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PerfCounterBitSelect),
+ sizeof(psDevInfo->psSGXHostCtl->ui32PerfCounterBitSelect),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PerfSumMux),
+ sizeof(psDevInfo->psSGXHostCtl->ui32PerfSumMux),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif /* PDUMP */
+ #else
+ psDevInfo->psSGXHostCtl->ui32PerfGroup = psSetHWPerfStatus->ui32PerfGroup;
+ #if defined(PDUMP)
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PerfGroup),
+ sizeof(psDevInfo->psSGXHostCtl->ui32PerfGroup),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif /* PDUMP */
+ #endif /* SGX_FEATURE_EXTENDED_PERF_COUNTERS */
+
+ /* Kick the ukernel to update the hardware state */
+ sCommandData.ui32Data[0] = psSetHWPerfStatus->ui32NewHWPerfStatus;
+ eError = SGXScheduleCCBCommandKM(psDeviceNode,
+ SGXMKIF_CMD_SETHWPERFSTATUS,
+ &sCommandData,
+ KERNEL_ID,
+ 0,
+ hDevMemContext,
+ IMG_FALSE);
+ return eError;
+ }
+#endif /* SUPPORT_SGX_HWPERF */
+
+ case SGX_MISC_INFO_DUMP_DEBUG_INFO:
+ {
+ PVR_LOG(("User requested SGX debug info"));
+
+ /* Dump SGX debug data to the kernel log. */
+ SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_FALSE);
+
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_DUMP_DEBUG_INFO_FORCE_REGS:
+ {
+ PVR_LOG(("User requested SGX debug info"));
+
+ /* Dump SGX debug data to the kernel log. */
+ SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE);
+
+ return PVRSRV_OK;
+ }
+
+#if defined(DEBUG)
+ /* Don't allow user-mode to reboot the device in production drivers */
+ case SGX_MISC_INFO_PANIC:
+ {
+ PVR_LOG(("User requested SGX panic"));
+
+ SGXPanic(psDeviceNode->pvDevice);
+
+ return PVRSRV_OK;
+ }
+#endif
+
+ default:
+ {
+ /* switch statement fell though, so: */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ArraySize,
+ PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry,
+ IMG_UINT32 *pui32DataCount,
+ IMG_UINT32 *pui32ClockSpeed,
+ IMG_UINT32 *pui32HostTimeStamp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
+ IMG_UINT i;
+
+ for (i = 0;
+ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
+ i++)
+ {
+ SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
+
+ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
+ psClientHWPerfEntry[i].ui32PID = psMKPerfEntry->ui32PID;
+ psClientHWPerfEntry[i].ui32RTData = psMKPerfEntry->ui32RTData;
+ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
+ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
+ psClientHWPerfEntry[i].ui32Info = psMKPerfEntry->ui32Info;
+ psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo,
+ psMKPerfEntry->ui32TimeWraps,
+ psMKPerfEntry->ui32Time);
+ OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0][0],
+ &psMKPerfEntry->ui32Counters[0][0],
+ sizeof(psMKPerfEntry->ui32Counters));
+
+ OSMemCopy(&psClientHWPerfEntry[i].ui32MiscCounters[0][0],
+ &psMKPerfEntry->ui32MiscCounters[0][0],
+ sizeof(psMKPerfEntry->ui32MiscCounters));
+
+ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
+ }
+
+ *pui32DataCount = i;
+ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
+ *pui32HostTimeStamp = OSClockus();
+
+ return eError;
+}
+
+
+/******************************************************************************
+ End of file (sgxinit.c)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxkick.c b/pvr-source/services4/srvkm/devices/sgx/sgxkick.c
new file mode 100644
index 0000000..584f538
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxkick.c
@@ -0,0 +1,899 @@
+/*************************************************************************/ /*!
+@Title Device specific kickTA routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h> /* For the macro offsetof() */
+#include "services_headers.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#if defined (PDUMP)
+#include "sgxapi_km.h"
+#include "pdump_km.h"
+#endif
+#include "sgx_bridge_km.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "sgxutils.h"
+#include "ttrace.h"
+
+/*!
+******************************************************************************
+
+ @Function SGXDoKickKM
+
+ @Description
+
+ Really kicks the TA
+
+ @Input hDevHandle - Device handle
+
+ @Return ui32Error - success or failure
+
+******************************************************************************/
+IMG_EXPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK_KM *psCCBKick)
+#else
+PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick)
+#endif
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
+ SGXMKIF_CMDTA_SHARED *psTACmd;
+ IMG_UINT32 i;
+ IMG_HANDLE hDevMemContext = IMG_NULL;
+#if defined(FIX_HW_BRN_31620)
+ hDevMemContext = psCCBKick->hDevMemContext;
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, KICK_TOKEN_DOKICK);
+
+ if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset"));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, KICK_TOKEN_DOKICK);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ /* override QAC warning about stricter alignment */
+ /* PRQA S 3305 1 */
+ psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_START, KICK_TOKEN_DOKICK);
+
+#if defined(TTRACE)
+ if (psCCBKick->bFirstKickOrResume)
+ {
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK,
+ PVRSRV_TRACE_CLASS_FLAGS,
+ KICK_TOKEN_FIRST_KICK);
+ }
+
+ if (psCCBKick->bLastInScene)
+ {
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK,
+ PVRSRV_TRACE_CLASS_FLAGS,
+ KICK_TOKEN_LAST_KICK);
+ }
+#endif
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CCB,
+ KICK_TOKEN_CCB_OFFSET, psCCBKick->ui32CCBOffset);
+
+ /* TA/3D dependency */
+ if (psCCBKick->hTA3DSyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA3D_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+
+ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+
+ if (psCCBKick->bTADependency)
+ {
+ psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+ }
+
+ if (psCCBKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+
+ psTACmd->ui32TATQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psCCBKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_3D_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+
+ psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
+ if (psCCBKick->ui32NumTAStatusVals != 0)
+ {
+ /* Copy status vals over */
+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+#endif
+ }
+ }
+
+ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
+ if (psCCBKick->ui32Num3DStatusVals != 0)
+ {
+ /* Copy status vals over */
+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+#endif
+ }
+ }
+
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ /* SRC and DST sync dependencies */
+ psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs;
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+
+ psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+ /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */
+ psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ /* Copy ui32WriteOpsPending snapshot into the CCB. */
+ psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs;
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+
+ psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+ /* Get ui32ReadOpsPending snapshot and copy into the CCB */
+ psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ /* Copy ui32WriteOpsPending snapshot into the CCB - before incrementing */
+ psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+ psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs;
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+
+ psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+ /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */
+ psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ /* Copy ui32WriteOpsPending snapshot into the CCB. */
+ psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+#else /* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+ /* texture dependencies */
+ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_SRC_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+ /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */
+ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ /* Copy ui32WriteOpsPending snapshot into the CCB. */
+ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+
+ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
+ {
+ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
+ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
+ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
+
+ PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->uAllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) +
+ (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs)));
+
+ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
+ PDUMPMEM(IMG_NULL,
+ psHWDstSyncListMemInfo,
+ 0,
+ sizeof(SGXMKIF_HWDEVICE_SYNC_LIST),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+ }
+#endif
+
+ for (i=0; i<ui32NumDstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
+
+ if (psSyncInfo)
+ {
+ psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_DST_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr;
+
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ #if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData)
+ + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT));
+ IMG_UINT32 ui32WOpsOffset = ui32SyncOffset
+ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal);
+ IMG_UINT32 ui32ROpsOffset = ui32SyncOffset
+ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal);
+ IMG_UINT32 ui32ROps2Offset = ui32SyncOffset
+ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal);
+
+ PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i);
+
+ PDUMPMEM(IMG_NULL,
+ psHWDstSyncListMemInfo,
+ ui32SyncOffset,
+ sizeof(PVRSRV_DEVICE_SYNC_OBJECT),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+ /*
+ * Init the ROpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT ROpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ /*
+ * Init the WOpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psHWDstSyncListMemInfo,
+ ui32WOpsOffset,
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+
+ ui32ModifiedValue = 0;
+ PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psHWDstSyncListMemInfo,
+ ui32ROpsOffset,
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+
+ /*
+ * Force the ROps2Complete value to 0.
+ */
+ PDUMPCOMMENT("Modify RT %d ROps2PendingVal in HWDevSyncList\r\n", i);
+ PDUMPMEM(&ui32ModifiedValue,
+ psHWDstSyncListMemInfo,
+ ui32ROps2Offset,
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+ }
+ #endif /* defined(PDUMP) */
+ }
+ else
+ {
+ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0;
+ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0;
+ psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr.uiAddr = 0;
+
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0;
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = 0;
+ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0;
+ }
+ }
+ }
+
+ /*
+ NOTE: THIS MUST BE THE LAST THING WRITTEN TO THE TA COMMAND!
+ Set the ready for so the uKernel will process the command.
+ */
+ psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY;
+
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ PDUMPCOMMENT("Shared part of TA command\r\n");
+
+ PDUMPMEM(psTACmd,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff,
+ sizeof(SGXMKIF_CMDTA_SHARED),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+ /*
+ * Init the ROpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n", i);
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ /*
+ * Init the WOpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+ /*
+ * Init the ROpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n", i);
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ /*
+ * Init the WOpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+ /*
+ * Init the ROpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n", i);
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ /*
+ * Init the WOpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+#else/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+ /*
+ * Init the ROpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT ROpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ /*
+ * Init the WOpsComplete value to 0.
+ */
+ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ /*
+ * Init the ROps2Complete value to 0.
+ */
+ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOps2Complete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOps2Complete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ if (psCCBKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+
+ PDUMPCOMMENT("Modify TA/TQ ROpPendingVal\r\n");
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui32TATQSyncReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+
+ if (psCCBKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+
+ PDUMPCOMMENT("Modify 3D/TQ ROpPendingVal\r\n");
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui323DTQSyncReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+
+#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+
+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+ {
+#if !defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+#endif
+ }
+
+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+ {
+#if !defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+#endif
+ }
+ }
+#endif /* defined(PDUMP) */
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_END,
+ KICK_TOKEN_DOKICK);
+
+ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0, hDevMemContext, psCCBKick->bLastInScene);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
+ {
+ for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++)
+ {
+ /* Client will retry, so undo the write ops pending increment done above. */
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
+
+ if (psSyncInfo)
+ {
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
+ }
+#endif
+ }
+ }
+ }
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+#else/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+
+ if (psCCBKick->hTA3DSyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+
+ if (psCCBKick->hTASyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+
+ if (psCCBKick->h3DSyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ KICK_TOKEN_DOKICK);
+ return eError;
+ }
+ else if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed."));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ KICK_TOKEN_DOKICK);
+ return eError;
+ }
+
+
+#if defined(NO_HARDWARE)
+
+
+ /* TA/3D dependency */
+ if (psCCBKick->hTA3DSyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
+
+ if (psCCBKick->bTADependency)
+ {
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ }
+
+ if (psCCBKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+
+ if (psCCBKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+
+ /* Copy status vals over */
+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo;
+ /* derive offset into meminfo and write the status value */
+ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
+ + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr
+ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
+#endif
+ }
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ /* SRC and DST dependencies */
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+#else/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+ /* texture dependencies */
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */
+
+ if (psCCBKick->bTerminateOrAbort)
+ {
+ if (psCCBKick->ui32NumDstSyncObjects > 0)
+ {
+ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
+ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
+ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
+
+ for (i=0; i<psCCBKick->ui32NumDstSyncObjects; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
+ if (psSyncInfo)
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1;
+ }
+ }
+
+ /* Copy status vals over */
+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo;
+ /* derive offset into meminfo and write the status value */
+ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
+ + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr
+ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
+#endif
+ }
+ }
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ KICK_TOKEN_DOKICK);
+ return eError;
+}
+
+/******************************************************************************
+ End of file (sgxkick.c)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxpower.c b/pvr-source/services4/srvkm/devices/sgx/sgxpower.c
new file mode 100644
index 0000000..2acd28d
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxpower.c
@@ -0,0 +1,630 @@
+/*************************************************************************/ /*!
+@Title Device specific power routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "sgxapi_km.h"
+#include "sgx_mkif_km.h"
+#include "sgxutils.h"
+#include "pdump_km.h"
+
+int powering_down = 0;
+
+
+#if defined(SUPPORT_HW_RECOVERY)
+static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGX_TIMING_INFORMATION *psSGXTimingInfo,
+ IMG_HANDLE *phTimer)
+{
+ /*
+ Install timer callback for HW recovery at 50 times lower
+ frequency than the microkernel timer.
+ */
+ *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
+ 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
+ if(*phTimer == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PVRSRV_OK;
+}
+#endif /* SUPPORT_HW_RECOVERY*/
+
+
+/*!
+******************************************************************************
+
+ @Function SGXUpdateTimingInfo
+
+ @Description
+
+ Derives the microkernel timing info from the system-supplied values
+
+ @Input psDeviceNode : SGX Device node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#if defined(SGX_DYNAMIC_TIMING_INFO)
+ SGX_TIMING_INFORMATION sSGXTimingInfo = {0};
+#else
+ SGX_DEVICE_MAP *psSGXDeviceMap;
+#endif
+ IMG_UINT32 ui32ActivePowManSampleRate;
+ SGX_TIMING_INFORMATION *psSGXTimingInfo;
+
+
+#if defined(SGX_DYNAMIC_TIMING_INFO)
+ psSGXTimingInfo = &sSGXTimingInfo;
+ SysGetSGXTimingInformation(psSGXTimingInfo);
+#else
+ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMap);
+ psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo;
+#endif
+
+#if defined(SUPPORT_HW_RECOVERY)
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32OlduKernelFreq;
+
+ if (psDevInfo->hTimer != IMG_NULL)
+ {
+ ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock;
+ if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq)
+ {
+ /*
+ The ukernel timer frequency has changed.
+ */
+ IMG_HANDLE hNewTimer;
+
+ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer);
+ if (eError == PVRSRV_OK)
+ {
+ eError = OSRemoveTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer"));
+ }
+ psDevInfo->hTimer = hNewTimer;
+ }
+ else
+ {
+ /* Failed to allocate the new timer, leave the old one. */
+ }
+ }
+ }
+ else
+ {
+ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate =
+ psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
+ }
+#endif /* SUPPORT_HW_RECOVERY*/
+
+ /* Copy the SGX clock speed for use in the kernel */
+ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
+ psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
+
+ /* FIXME: no need to duplicate - remove it from psDevInfo */
+ psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
+#if defined(PDUMP)
+ PDUMPCOMMENT("Host Control - Microkernel clock");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+#endif /* PDUMP */
+
+ if (psSGXTimingInfo->bEnableActivePM)
+ {
+ ui32ActivePowManSampleRate =
+ psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
+ /*
+ ui32ActivePowerCounter has the value 0 when SGX is not idle.
+ When SGX becomes idle, the value of ui32ActivePowerCounter is changed from 0 to ui32ActivePowManSampleRate.
+ The ukernel timer routine decrements the value of ui32ActivePowerCounter if it is not 0.
+ When the ukernel timer decrements ui32ActivePowerCounter from 1 to 0, the ukernel timer will
+ request power down.
+ Therefore the minimum value of ui32ActivePowManSampleRate is 1.
+ */
+ ui32ActivePowManSampleRate += 1;
+ }
+ else
+ {
+ ui32ActivePowManSampleRate = 0;
+ }
+
+ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate;
+#if defined(PDUMP)
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+#endif /* PDUMP */
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXStartTimer
+
+ @Description
+
+ Start the microkernel timer
+
+ @Input psDevInfo : SGX Device Info
+
+ @Return IMG_VOID :
+
+******************************************************************************/
+static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ #if defined(SUPPORT_HW_RECOVERY)
+ PVRSRV_ERROR eError;
+
+ eError = OSEnableTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer"));
+ }
+ #else
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ #endif /* SUPPORT_HW_RECOVERY */
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXPollForClockGating
+
+ @Description
+
+ Wait until the SGX core clocks have gated.
+
+ @Input psDevInfo : SGX Device Info
+ @Input ui32Register : Offset of register to poll
+ @Input ui32Register : Value of register to poll for
+ @Input pszComment : Description of poll
+
+ @Return IMG_VOID :
+
+******************************************************************************/
+static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Register,
+ IMG_UINT32 ui32RegisterValue,
+ IMG_CHAR *pszComment)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(ui32Register);
+ PVR_UNREFERENCED_PARAMETER(ui32RegisterValue);
+ PVR_UNREFERENCED_PARAMETER(pszComment);
+
+ #if !defined(NO_HARDWARE)
+ PVR_ASSERT(psDevInfo != IMG_NULL);
+
+ /* PRQA S 0505 1 */ /* QAC does not like assert() */
+ if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2),
+ 0,
+ ui32RegisterValue,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPollForClockGating: %s failed.", pszComment));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ }
+ #endif /* NO_HARDWARE */
+
+ PDUMPCOMMENT("%s", pszComment);
+ PDUMPREGPOL(SGX_PDUMPREG_NAME, ui32Register, 0, ui32RegisterValue, PDUMP_POLL_OPERATOR_EQUAL);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input hDevHandle : SGX Device Node
+ @Input eNewPowerState : New power state
+ @Input eCurrentPowerState : Current power state
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32PowerCmd, ui32CompleteStatus;
+ SGXMKIF_COMMAND sCommand = {0};
+ IMG_UINT32 ui32Core;
+ IMG_UINT32 ui32CoresEnabled;
+
+ #if defined(SUPPORT_HW_RECOVERY)
+ /* Disable timer callback for HW recovery */
+ eError = OSDisableTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
+ return eError;
+ }
+ #endif /* SUPPORT_HW_RECOVERY */
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ /* Request the ukernel to idle SGX and save its state. */
+ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
+ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
+ PDUMPCOMMENT("SGX power off request");
+ }
+ else
+ {
+ /* Request the ukernel to idle SGX. */
+ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
+ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
+ PDUMPCOMMENT("SGX idle request");
+ }
+
+ powering_down = 1;
+
+ sCommand.ui32Data[1] = ui32PowerCmd;
+
+ eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command"));
+ return eError;
+ }
+
+ /* Wait for the ukernel to complete processing. */
+ #if !defined(NO_HARDWARE)
+ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
+ ui32CompleteStatus,
+ ui32CompleteStatus,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed."));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ }
+ #endif /* NO_HARDWARE */
+
+ psDevInfo->bSGXIdle = IMG_TRUE;
+
+ #if defined(PDUMP)
+ PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel.");
+ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
+ ui32CompleteStatus,
+ ui32CompleteStatus,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif /* PDUMP */
+
+#if defined(SGX_FEATURE_MP)
+ ui32CoresEnabled = ((OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE) & EUR_CR_MASTER_CORE_ENABLE_MASK) >> EUR_CR_MASTER_CORE_ENABLE_SHIFT) + 1;
+#else
+ ui32CoresEnabled = 1;
+#endif
+
+ for (ui32Core = 0; ui32Core < ui32CoresEnabled; ui32Core++)
+ {
+ /* Wait for SGX clock gating. */
+ SGXPollForClockGating(psDevInfo,
+ SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core),
+ psDevInfo->ui32ClkGateStatusMask,
+ "Wait for SGX clock gating");
+ }
+
+ #if defined(SGX_FEATURE_MP)
+ /* Wait for SGX master clock gating. */
+ SGXPollForClockGating(psDevInfo,
+ psDevInfo->ui32MasterClkGateStatusReg,
+ psDevInfo->ui32MasterClkGateStatusMask,
+ "Wait for SGX master clock gating");
+
+ SGXPollForClockGating(psDevInfo,
+ psDevInfo->ui32MasterClkGateStatus2Reg,
+ psDevInfo->ui32MasterClkGateStatus2Mask,
+ "Wait for SGX master clock gating (2)");
+ #endif /* SGX_FEATURE_MP */
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ /* Finally, de-initialise some registers. */
+ eError = SGXDeinitialise(psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %u", eError));
+ return eError;
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input hDevHandle : SGX Device Node
+ @Input eNewPowerState : New power state
+ @Input eCurrentPowerState : Current power state
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+
+ /* Reset the power manager flags. */
+ psSGXHostCtl->ui32PowerStatus = 0;
+ #if defined(PDUMP)
+ PDUMPCOMMENT("Host Control - Reset power status");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif /* PDUMP */
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ /*
+ Coming up from off, re-initialise SGX.
+ */
+
+ /*
+ Re-generate the timing data required by SGX.
+ */
+ eError = SGXUpdateTimingInfo(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
+ return eError;
+ }
+
+ /*
+ Run the SGX init script.
+ */
+ eError = SGXInitialise(psDevInfo, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
+ return eError;
+ }
+ powering_down = 0;
+ }
+ else
+ {
+ /*
+ Coming up from idle, restart the ukernel.
+ */
+ SGXMKIF_COMMAND sCommand = {0};
+
+ sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
+ eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %u", eError));
+ return eError;
+ }
+ }
+
+ SGXStartTimer(psDevInfo);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXPreClockSpeedChange
+
+ @Description
+
+ Does processing required before an SGX clock speed change.
+
+ @Input hDevHandle : SGX Device Node
+ @Input bIdleDevice : Whether the microkernel needs to be idled
+ @Input eCurrentPowerState : Power state of the device
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ if (bIdleDevice)
+ {
+ /*
+ * Idle SGX.
+ */
+ PDUMPSUSPEND();
+
+ eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE,
+ PVRSRV_DEV_POWER_STATE_ON);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPRESUME();
+ return eError;
+ }
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %uHz",
+ psDevInfo->ui32CoreClockSpeed));
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXPostClockSpeedChange
+
+ @Description
+
+ Does processing required after an SGX clock speed change.
+
+ @Input hDevHandle : SGX Device Node
+ @Input bIdleDevice : Whether the microkernel had been idled previously
+ @Input eCurrentPowerState : Power state of the device
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;
+
+ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed);
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ PVRSRV_ERROR eError;
+
+ /*
+ Re-generate the timing data required by SGX.
+ */
+ eError = SGXUpdateTimingInfo(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
+ return eError;
+ }
+
+ if (bIdleDevice)
+ {
+ /*
+ * Resume SGX.
+ */
+ eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_IDLE);
+
+ PDUMPRESUME();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+ else
+ {
+ SGXStartTimer(psDevInfo);
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %uHz to %uHz",
+ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed));
+
+ return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ End of file (sgxpower.c)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxreset.c b/pvr-source/services4/srvkm/devices/sgx/sgxreset.c
new file mode 100644
index 0000000..dcdefae
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxreset.c
@@ -0,0 +1,808 @@
+/*************************************************************************/ /*!
+@Title Device specific reset routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "sgxdefs.h"
+#include "sgxmmu.h"
+#include "services_headers.h"
+#include "sgxinfokm.h"
+#include "sgxconfig.h"
+#include "sgxutils.h"
+
+#include "pdump_km.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXInitClocks
+
+ @Description
+ Initialise the SGX clocks
+
+ @Input psDevInfo - device info. structure
+ @Input ui32PDUMPFlags - flags to control PDUMP output
+
+ @Return IMG_VOID
+
+******************************************************************************/
+IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags)
+{
+ IMG_UINT32 ui32RegVal;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ ui32RegVal = psDevInfo->ui32ClkGateCtl;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(EUR_CR_CLKGATECTL2)
+ ui32RegVal = psDevInfo->ui32ClkGateCtl2;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL2, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL2, ui32RegVal, ui32PDUMPFlags);
+#endif
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXResetInitBIFContexts
+
+ @Description
+ Initialise the BIF memory contexts
+
+ @Input psDevInfo - SGX Device Info
+
+ @Return IMG_VOID
+
+******************************************************************************/
+static IMG_VOID SGXResetInitBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags)
+{
+ IMG_UINT32 ui32RegVal;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF bank settings\r\n");
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
+#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF directory list\r\n");
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ {
+ IMG_UINT32 ui32DirList, ui32DirListReg;
+
+ for (ui32DirList = 1;
+ ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS;
+ ui32DirList++)
+ {
+ ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, ui32DirListReg, ui32RegVal, ui32PDUMPFlags);
+ }
+ }
+#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXResetSetupBIFContexts
+
+ @Description
+ Configure the BIF for the EDM context
+
+ @Input psDevInfo - SGX Device Info
+
+ @Return IMG_VOID
+
+******************************************************************************/
+static IMG_VOID SGXResetSetupBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags)
+{
+ IMG_UINT32 ui32RegVal;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ /* Set up EDM for bank 0 to point at kernel context */
+ ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
+
+ #if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA)
+ /* Set up 2D core for bank 0 to point at kernel context */
+ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
+ #endif /* SGX_FEATURE_2D_HARDWARE */
+
+ #if defined(FIX_HW_BRN_23410)
+ /* Set up TA core for bank 0 to point at kernel context to guarantee it is a valid context */
+ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
+ #endif /* FIX_HW_BRN_23410 */
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Set up EDM requestor page table in BIF\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
+ #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) */
+
+ {
+ IMG_UINT32 ui32EDMDirListReg;
+
+ /* Set up EDM context with kernel page directory */
+ #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
+ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
+ #else
+ /* Bases 0 and 1 are not necessarily contiguous */
+ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
+ #endif /* SGX_BIF_DIR_LIST_INDEX_EDM */
+
+ ui32RegVal = psDevInfo->sKernelPDDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT;
+
+#if defined(FIX_HW_BRN_28011)
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+ PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
+#endif
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the EDM's directory list base\r\n");
+ PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, ui32EDMDirListReg, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXResetSleep
+
+ @Description
+
+ Sleep for a short time to allow reset register writes to complete.
+ Required because no status registers are available to poll on.
+
+ @Input psDevInfo - SGX Device Info
+ @Input ui32PDUMPFlags - flags to control PDUMP output
+ @Input bPDump - Pdump the sleep
+
+ @Return Nothing
+
+******************************************************************************/
+static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags,
+ IMG_BOOL bPDump)
+{
+#if defined(PDUMP) || defined(EMULATOR)
+ IMG_UINT32 ui32ReadRegister;
+
+ #if defined(SGX_FEATURE_MP)
+ ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET;
+ #else
+ ui32ReadRegister = EUR_CR_SOFT_RESET;
+ #endif /* SGX_FEATURE_MP */
+#endif
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ /* Sleep for 100 SGX clocks */
+ SGXWaitClocks(psDevInfo, 100);
+ if (bPDump)
+ {
+ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n");
+ PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags);
+#endif
+ }
+
+#if defined(EMULATOR)
+ /*
+ Read a register to make sure we wait long enough on the emulator...
+ */
+ OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister);
+#endif
+}
+
+
+#if !defined(SGX_FEATURE_MP)
+/*!
+*******************************************************************************
+
+ @Function SGXResetSoftReset
+
+ @Description
+
+ Write to the SGX soft reset register.
+
+ @Input psDevInfo - SGX Device Info
+ @Input bResetBIF - Include the BIF in the soft reset
+ @Input ui32PDUMPFlags - flags to control PDUMP output
+ @Input bPDump - Pdump the sleep
+
+ @Return Nothing
+
+******************************************************************************/
+static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bResetBIF,
+ IMG_UINT32 ui32PDUMPFlags,
+ IMG_BOOL bPDump)
+{
+ IMG_UINT32 ui32SoftResetRegVal;
+
+ ui32SoftResetRegVal =
+ /* add common reset bits: */
+ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
+ EUR_CR_SOFT_RESET_TA_RESET_MASK |
+ EUR_CR_SOFT_RESET_USE_RESET_MASK |
+ EUR_CR_SOFT_RESET_ISP_RESET_MASK |
+ EUR_CR_SOFT_RESET_TSP_RESET_MASK;
+
+/* add conditional reset bits: */
+#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK;
+#endif
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ if (bResetBIF)
+ {
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+ }
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXResetInvalDC
+
+ @Description
+
+ Invalidate the BIF Directory Cache and wait for the operation to complete.
+
+ @Input psDevInfo - SGX Device Info
+ @Input ui32PDUMPFlags - flags to control PDUMP output
+
+ @Return Nothing
+
+******************************************************************************/
+static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags,
+ IMG_BOOL bPDump)
+{
+ IMG_UINT32 ui32RegVal;
+
+ /* Invalidate BIF Directory cache. */
+#if defined(EUR_CR_BIF_CTRL_INVAL)
+ ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags);
+ }
+#else
+ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+ }
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+ }
+#endif
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ {
+ /*
+ Wait for the DC invalidate to complete - indicated by
+ outstanding reads reaching zero.
+ */
+ if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
+ 0,
+ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
+ PVR_DBG_BREAK;
+ }
+
+ if (bPDump)
+ {
+ PDUMPREGPOLWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags, PDUMP_POLL_OPERATOR_EQUAL);
+ }
+ }
+#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */
+}
+#endif /* SGX_FEATURE_MP */
+
+
+/*!
+*******************************************************************************
+
+ @Function SGXReset
+
+ @Description
+
+ Reset chip
+
+ @Input psDevInfo - device info. structure
+ @Input bHardwareRecovery - true if recovering powered hardware,
+ false if powering up
+ @Input ui32PDUMPFlags - flags to control PDUMP output
+
+ @Return IMG_VOID
+
+******************************************************************************/
+IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery,
+ IMG_UINT32 ui32PDUMPFlags)
+#if !defined(SGX_FEATURE_MP)
+{
+ IMG_UINT32 ui32RegVal;
+#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK)
+ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK;
+#else
+ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
+#endif
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
+
+#if defined(FIX_HW_BRN_23944)
+ /* Pause the BIF. */
+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+ if (ui32RegVal & ui32BifFaultMask)
+ {
+ /* Page fault needs to be cleared before resetting the BIF. */
+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+ }
+#endif /* defined(FIX_HW_BRN_23944) */
+
+ /* Reset all including BIF */
+ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ /*
+ Initialise the BIF state.
+ */
+#if defined(SGX_FEATURE_36BIT_MMU)
+ /* enable 36bit addressing mode if the MMU supports it*/
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags);
+#endif
+
+ SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags);
+
+#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
+ /*
+ Initialise the memory arbiter to its default state
+ */
+ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
+ (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
+ (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags);
+#endif /* EUR_CR_BIF_MEM_ARB_CONFIG */
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(SGX_BYPASS_SYSTEM_CACHE)
+ /* set the SLC to bypass all accesses */
+ ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK;
+ #else
+ #if defined(FIX_HW_BRN_26620)
+ ui32RegVal = 0;
+ #else
+ /* set the SLC to bypass cache-coherent accesses */
+ ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK;
+ #endif
+ #if defined(FIX_HW_BRN_34028)
+ /* Bypass the MNE for the USEC requester */
+ ui32RegVal |= (8 << MNE_CR_CTRL_BYPASS_SHIFT);
+ #endif
+ #endif /* SGX_BYPASS_SYSTEM_CACHE */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal);
+ PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal);
+#endif /* SGX_FEATURE_SYSTEM_CACHE */
+
+ if (bHardwareRecovery)
+ {
+ /*
+ Set all requestors to the dummy PD which forces all memory
+ accesses to page fault.
+ This enables us to flush out BIF requests from parts of SGX
+ which do not have their own soft reset.
+ Note: sBIFResetPDDevPAddr.uiAddr is a relative address (2GB max)
+ MSB is the bus master flag; 1 == enabled
+ */
+ ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ /* Bring BIF out of reset. */
+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ /*
+ Check for a page fault from parts of SGX which do not have a reset.
+ */
+ for (;;)
+ {
+ IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+ IMG_DEV_VIRTADDR sBifFault;
+ IMG_UINT32 ui32PDIndex, ui32PTIndex;
+
+ if ((ui32BifIntStat & ui32BifFaultMask) == 0)
+ {
+ break;
+ }
+
+ /*
+ There is a page fault, so reset the BIF again, map in the dummy page,
+ bring the BIF up and invalidate the Directory Cache.
+ */
+ sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
+ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
+ ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+ /* Put the BIF into reset. */
+ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
+
+ /* Map in the dummy page. */
+ psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr
+ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+ psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr
+ >>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+
+ /* Clear outstanding events. */
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ /* Bring the BIF out of reset. */
+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ /* Invalidate Directory Cache. */
+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ /* Unmap the dummy page and try again. */
+ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
+ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
+ }
+ }
+ else
+ {
+ /* Bring BIF out of reset. */
+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+ }
+
+ /*
+ Initialise the BIF memory contexts before bringing the rest of SGX out of reset.
+ */
+ SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA)
+ /* check that the heap base has the right alignment (1Mb) */
+ #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0)
+ #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment"
+ #endif
+ /* Set up 2D requestor base */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
+#endif
+
+ /* Invalidate BIF Directory cache. */
+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX"));
+
+ /* Take chip out of reset */
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+
+ /* wait a bit */
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
+}
+
+#else
+
+{
+ IMG_UINT32 ui32RegVal;
+
+ PVR_UNREFERENCED_PARAMETER(bHardwareRecovery);
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif /* PDUMP */
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX MP reset sequence\r\n");
+
+ /* Put hydra into soft reset */
+ ui32RegVal = EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
+
+ if (bHardwareRecovery)
+ {
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_MCI_RESET_MASK;
+ }
+
+#if defined(SGX_FEATURE_PTLA)
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_MASK;
+#endif
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK;
+#endif
+
+ /* Hard reset the slave cores */
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(0) |
+ EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(1) |
+ EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(2) |
+ EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(3);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Soft reset hydra partition, hard reset the cores\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra BIF control\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(SGX_BYPASS_SYSTEM_CACHE)
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_MASK;
+ #else
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
+ #if defined(FIX_HW_BRN_30954)
+ EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_MASK |
+ #endif
+ #if defined(PVR_SLC_8KB_ADDRESS_MODE)
+ (4 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) |
+ #endif
+ #if defined(FIX_HW_BRN_33809)
+ (2 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) |
+ #endif
+ (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC control\r\n");
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
+
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK;
+ #if defined(FIX_HW_BRN_31620)
+ ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_MASK;
+ #endif
+ #if defined(FIX_HW_BRN_31195)
+ ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK;
+ #endif
+ #endif /* SGX_BYPASS_SYSTEM_CACHE */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC bypass control\r\n");
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
+#endif /* SGX_FEATURE_SYSTEM_CACHE */
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ /* Remove the resets */
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Remove the resets from all of SGX\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Turn on the slave cores' clock gating\r\n");
+ SGXInitClocks(psDevInfo, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the slave BIFs\r\n");
+
+#if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_31620) || defined(FIX_HW_BRN_31671) || defined(FIX_HW_BRN_32085)
+ #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085)
+ /* disable prefetch */
+ ui32RegVal = (1<<EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT);
+ #else
+ ui32RegVal = (1<<EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT) | EUR_CR_MASTER_BIF_MMU_CTRL_PREFETCHING_ON_MASK;
+ #endif
+ #if !defined(FIX_HW_BRN_31620) && !defined(FIX_HW_BRN_31671)
+ /* enable the DC TLB */
+ ui32RegVal |= EUR_CR_MASTER_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK;
+ #endif
+
+ /* Master bank */
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085)
+ /* disable prefetch */
+ ui32RegVal = (1<<EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT);
+ #else
+ ui32RegVal = (1<<EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT) | EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK;
+ #endif
+ #if !defined(FIX_HW_BRN_31620) && !defined(FIX_HW_BRN_31671)
+ /* enable the DC TLB */
+ ui32RegVal |= EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK;
+ #endif
+
+ /* Per-core */
+ {
+ IMG_UINT32 ui32Core;
+
+ for (ui32Core=0;ui32Core<SGX_FEATURE_MP_CORE_COUNT;ui32Core++)
+ {
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal, ui32PDUMPFlags);
+ }
+ }
+#endif
+
+ SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags);
+ SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX MP reset sequence\r\n");
+}
+#endif /* SGX_FEATURE_MP */
+
+
+/******************************************************************************
+ End of file (sgxreset.c)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c b/pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c
new file mode 100644
index 0000000..81f3b07
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c
@@ -0,0 +1,814 @@
+/*************************************************************************/ /*!
+@Title Device specific transfer queue routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(TRANSFER_QUEUE)
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "sgxinfo.h"
+#include "sysconfig.h"
+#include "pdump_km.h"
+#include "mmu.h"
+#include "pvr_bridge.h"
+#include "sgx_bridge_km.h"
+#include "sgxinfokm.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "sgxutils.h"
+#include "ttrace.h"
+
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK_KM *psKick)
+#else
+IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
+#endif
+{
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
+ SGXMKIF_COMMAND sCommand = {0};
+ SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 loop;
+ IMG_HANDLE hDevMemContext = IMG_NULL;
+ IMG_BOOL abSrcSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS];
+ IMG_UINT32 ui32RealSrcSyncNum = 0;
+ IMG_BOOL abDstSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS];
+ IMG_UINT32 ui32RealDstSyncNum = 0;
+
+
+#if defined(PDUMP)
+ IMG_BOOL bPersistentProcess = IMG_FALSE;
+ /*
+ * For persistent processes, the HW kicks should not go into the
+ * extended init phase; only keep memory transactions from the
+ * window system which are necessary to run the client app.
+ */
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc != IMG_NULL)
+ {
+ bPersistentProcess = psPerProc->bPDumpPersistent;
+ }
+ }
+#endif /* PDUMP */
+#if defined(FIX_HW_BRN_31620)
+ hDevMemContext = psKick->hDevMemContext;
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, TRANSFER_TOKEN_SUBMIT);
+
+ for (loop = 0; loop < SGX_MAX_TRANSFER_SYNC_OPS; loop++)
+ {
+ abSrcSyncEnable[loop] = IMG_TRUE;
+ abDstSyncEnable[loop] = IMG_TRUE;
+ }
+
+ if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset"));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ TRANSFER_TOKEN_SUBMIT);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ /* override QAC warning about stricter alignment */
+ /* PRQA S 3305 1 */
+ psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_START, TRANSFER_TOKEN_SUBMIT);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CCB,
+ TRANSFER_TOKEN_CCB_OFFSET, psKick->ui32SharedCmdCCBOffset);
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_TA_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+ else
+ {
+ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
+ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_3D_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+ else
+ {
+ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
+ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
+ }
+
+ /* filter out multiple occurrences of the same sync object from srcs or dests
+ * note : the same sync can still be used to synchronize both src and dst.
+ */
+ for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumSrcSync); loop++)
+ {
+ IMG_UINT32 i;
+
+ PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+
+ for (i = 0; i < loop; i++)
+ {
+ if (abSrcSyncEnable[i])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
+
+ if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same src synchronized multiple times!"));
+ abSrcSyncEnable[loop] = IMG_FALSE;
+ break;
+ }
+ }
+ }
+ if (abSrcSyncEnable[loop])
+ {
+ ui32RealSrcSyncNum++;
+ }
+ }
+ for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumDstSync); loop++)
+ {
+ IMG_UINT32 i;
+
+ PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+
+ for (i = 0; i < loop; i++)
+ {
+ if (abDstSyncEnable[i])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
+
+ if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same dst synchronized multiple times!"));
+ abDstSyncEnable[loop] = IMG_FALSE;
+ break;
+ }
+ }
+ }
+ if (abDstSyncEnable[loop])
+ {
+ ui32RealDstSyncNum++;
+ }
+ }
+
+ psSharedTransferCmd->ui32NumSrcSyncs = ui32RealSrcSyncNum;
+ psSharedTransferCmd->ui32NumDstSyncs = ui32RealDstSyncNum;
+
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
+ {
+ IMG_UINT32 i = 0;
+
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_SRC_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ psSharedTransferCmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psSharedTransferCmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ i++;
+ }
+ }
+ PVR_ASSERT(i == ui32RealSrcSyncNum);
+
+ i = 0;
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+
+ psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_DST_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->asDstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ psSharedTransferCmd->asDstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ psSharedTransferCmd->asDstSyncs[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ psSharedTransferCmd->asDstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->asDstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psSharedTransferCmd->asDstSyncs[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr;
+ i++;
+ }
+ }
+ PVR_ASSERT(i == ui32RealDstSyncNum);
+
+ /*
+ * We allow source and destination sync objects to be the
+ * same, which is why the read/write pending updates are delayed
+ * until the transfer command has been updated with the current
+ * values from the objects.
+ */
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ }
+ }
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+ }
+ }
+
+#if defined(PDUMP)
+ if ((PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ && (bPersistentProcess == IMG_FALSE) )
+ {
+ PDUMPCOMMENT("Shared part of transfer command\r\n");
+ PDUMPMEM(psSharedTransferCmd,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff,
+ sizeof(SGXMKIF_TRANSFERCMD_SHARED),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
+ {
+ IMG_UINT32 i = 0;
+
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[loop];
+
+ PDUMPCOMMENT("Tweak src surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Tweak src surface read op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ i++;
+ }
+ }
+
+ i = 0;
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[i])
+ {
+ IMG_UINT32 ui32PDumpReadOp2 = 0;
+ psSyncInfo = psKick->ahDstSyncInfo[loop];
+
+ PDUMPCOMMENT("Tweak dest surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Tweak dest surface read op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Tweak dest surface read op2 in transfer cmd\r\n");
+ PDUMPMEM(&ui32PDumpReadOp2,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal)),
+ sizeof(ui32PDumpReadOp2),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ i++;
+ }
+ }
+
+ /*
+ * We allow the first source and destination sync objects to be the
+ * same, which is why the read/write pending updates are delayed
+ * until the transfer command has been updated with the current
+ * values from the objects.
+ */
+ for (loop = 0; loop < (psKick->ui32NumSrcSync); loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+ }
+
+ for (loop = 0; loop < (psKick->ui32NumDstSync); loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+ }
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hTASyncInfo;
+
+ PDUMPCOMMENT("Tweak TA/TQ surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32TASyncWriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->h3DSyncInfo;
+
+ PDUMPCOMMENT("Tweak 3D/TQ surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui323DSyncWriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+ }
+#endif
+
+ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_END,
+ TRANSFER_TOKEN_SUBMIT);
+
+ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ /* Client will retry, so undo the sync ops pending increment(s) done above. */
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
+ {
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
+ }
+#endif
+ }
+ }
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
+ }
+#endif
+ }
+ }
+ }
+
+ /* Command needed to be synchronised with the TA? */
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+
+ /* Command needed to be synchronised with the 3D? */
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+ }
+
+ else if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed."));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ TRANSFER_TOKEN_SUBMIT);
+ return eError;
+ }
+
+
+#if defined(NO_HARDWARE)
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0)
+ {
+ /* Update sync objects pretending that we have done the job*/
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+ }
+
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ }
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ TRANSFER_TOKEN_SUBMIT);
+ return eError;
+}
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK_KM *psKick)
+#else
+IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
+#endif
+
+{
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
+ SGXMKIF_COMMAND sCommand = {0};
+ SGXMKIF_2DCMD_SHARED *ps2DCmd;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ IMG_HANDLE hDevMemContext = IMG_NULL;
+#if defined(PDUMP)
+ IMG_BOOL bPersistentProcess = IMG_FALSE;
+ /*
+ * For persistent processes, the HW kicks should not go into the
+ * extended init phase; only keep memory transactions from the
+ * window system which are necessary to run the client app.
+ */
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc != IMG_NULL)
+ {
+ bPersistentProcess = psPerProc->bPDumpPersistent;
+ }
+ }
+#endif /* PDUMP */
+#if defined(FIX_HW_BRN_31620)
+ hDevMemContext = psKick->hDevMemContext;
+#endif
+
+ if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ /* override QAC warning about stricter alignment */
+ /* PRQA S 3305 1 */
+ ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
+
+ OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
+
+ /* Command needs to be synchronised with the TA? */
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+
+ /* Command needs to be synchronised with the 3D? */
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+
+ /*
+ * We allow the first source and destination sync objects to be the
+ * same, which is why the read/write pending updates are delayed
+ * until the transfer command has been updated with the current
+ * values from the objects.
+ */
+ ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+
+ ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+
+ ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ ps2DCmd->sDstSyncData.ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ ps2DCmd->sDstSyncData.sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr;
+ }
+
+ /* Read/Write ops pending updates, delayed from above */
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+#if defined(PDUMP)
+ if ((PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ && (bPersistentProcess == IMG_FALSE) )
+ {
+ /* Pdump the command from the per context CCB */
+ PDUMPCOMMENT("Shared part of 2D command\r\n");
+ PDUMPMEM(ps2DCmd,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff,
+ sizeof(SGXMKIF_2DCMD_SHARED),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+
+ PDUMPCOMMENT("Tweak src surface write op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Tweak src surface read op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ IMG_UINT32 ui32PDumpReadOp2 = 0;
+ psSyncInfo = psKick->hDstSyncInfo;
+
+ PDUMPCOMMENT("Tweak dest surface write op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Tweak dest surface read op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ PDUMPCOMMENT("Tweak dest surface read op2 in 2D cmd\r\n");
+ PDUMPMEM(&ui32PDumpReadOp2,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOps2PendingVal),
+ sizeof(ui32PDumpReadOp2),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ /* Read/Write ops pending updates, delayed from above */
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+ }
+#endif
+
+ sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
+
+ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ /* Client will retry, so undo the write ops pending increment
+ done above.
+ */
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
+ }
+ }
+#endif
+
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+
+ /* Command needed to be synchronised with the TA? */
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+
+ /* Command needed to be synchronised with the 3D? */
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+ }
+
+
+
+
+#if defined(NO_HARDWARE)
+ /* Update sync objects pretending that we have done the job*/
+ for(i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+#endif
+
+ return eError;
+}
+#endif /* SGX_FEATURE_2D_HARDWARE */
+#endif /* TRANSFER_QUEUE */
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxutils.c b/pvr-source/services4/srvkm/devices/sgx/sgxutils.c
new file mode 100644
index 0000000..227675d
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxutils.c
@@ -0,0 +1,1912 @@
+/*************************************************************************/ /*!
+@Title Device specific utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "sgx_bridge_km.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgx_mkif_km.h"
+#include "sysconfig.h"
+#include "pdump_km.h"
+#include "mmu.h"
+#include "pvr_bridge_km.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "sgxutils.h"
+#include "ttrace.h"
+
+#ifdef __linux__
+#include <linux/kernel.h> // sprintf
+#include <linux/string.h> // strncpy, strlen
+#else
+#include <stdio.h>
+#endif
+
+IMG_UINT64 ui64KickCount;
+
+
+#if defined(SYS_CUSTOM_POWERDOWN)
+PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID);
+#endif
+
+
+
+/*!
+******************************************************************************
+
+ @Function SGXPostActivePowerEvent
+
+ @Description
+
+ post power event functionality (e.g. restart)
+
+ @Input psDeviceNode : SGX Device Node
+ @Input ui32CallerID - KERNEL_ID or ISR_ID
+
+ @Return IMG_VOID :
+
+******************************************************************************/
+static IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32CallerID)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+
+ /* Update the counter for stats. */
+ psSGXHostCtl->ui32NumActivePowerEvents++;
+
+ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXPostActivePowerEvent: SGX requests immediate restart"));
+
+ /*
+ Events were queued during the active power
+ request, so SGX will need to be restarted.
+ */
+ if (ui32CallerID == ISR_ID)
+ {
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+ }
+ else
+ {
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+ }
+ }
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXTestActivePowerEvent
+
+ @Description
+
+ Checks whether the microkernel has generated an active power event. If so,
+ perform the power transition.
+
+ @Input psDeviceNode : SGX Device Node
+ @Input ui32CallerID - KERNEL_ID or ISR_ID
+
+ @Return IMG_VOID :
+
+******************************************************************************/
+IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32CallerID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+
+ /*
+ * Quickly check (without lock) if there is an IDLE or APM event we should handle.
+ * This check fails most of the time so we don't want to incur lock overhead.
+ * Check the flags in the reverse order that microkernel clears them to prevent
+ * us from seeing an inconsistent state.
+ */
+ if ((((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0) &&
+ ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0)) ||
+ (((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0) &&
+ ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0)))
+ {
+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXTestActivePowerEvent failed to acquire lock - "
+ "ui32CallerID:%d eError:%u", ui32CallerID, eError));
+ return;
+ }
+
+ /*
+ * Check again (with lock) if IDLE event has been cleared or handled. A race
+ * condition may allow multiple threads to pass the quick check.
+ */
+ if(((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0) &&
+ ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0))
+ {
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_IDLE;
+ psDevInfo->bSGXIdle = IMG_TRUE;
+
+ SysSGXIdleEntered();
+ }
+
+ /*
+ * Check again (with lock) if APM event has been cleared or handled. A race
+ * condition may allow multiple threads to pass the quick check.
+ */
+ if (((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0) &&
+ ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0))
+ {
+ /* Microkernel is idle and is requesting to be powered down. */
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
+
+ /* Suspend pdumping. */
+ PDUMPSUSPEND();
+
+#if defined(SYS_CUSTOM_POWERDOWN)
+ /*
+ Some power down code cannot be executed inside an MISR on
+ some platforms that use mutexes inside the power code.
+ */
+ eError = SysPowerDownMISR(psDeviceNode, ui32CallerID);
+#else
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE_OFF);
+#endif
+ if (eError == PVRSRV_OK)
+ {
+ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
+ }
+ /* Resume pdumping */
+ PDUMPRESUME();
+ }
+
+ PVRSRVPowerUnlock(ui32CallerID);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%u", eError));
+ }
+}
+
+
+/******************************************************************************
+ FUNCTION : SGXAcquireKernelCCBSlot
+
+ PURPOSE : Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS : psCCB - the CCB
+
+ RETURNS : Address of space if available, IMG_NULL otherwise
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SGXAcquireKernelCCBSlot)
+#endif
+static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
+{
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
+ {
+ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
+ }
+
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Time out on waiting for CCB space */
+ return IMG_NULL;
+}
+
+/*!
+******************************************************************************
+
+ @Function SGXScheduleCCBCommand
+
+ @Description - Submits a CCB command and kicks the ukernel (without
+ power management)
+
+ @Input psDevInfo - pointer to device info
+ @Input eCmdType - see SGXMKIF_CMD_*
+ @Input psCommandData - kernel CCB command
+ @Input ui32CallerID - KERNEL_ID or ISR_ID
+ @Input ui32PDumpFlags
+
+ @Return ui32Error - success or failure
+
+******************************************************************************/
+PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCmdType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene)
+{
+ PVRSRV_SGX_CCB_INFO *psKernelCCB;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SGXMKIF_COMMAND *psSGXCommand;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+#if defined(FIX_HW_BRN_31620)
+ IMG_UINT32 ui32CacheMasks[4];
+ IMG_UINT32 i;
+ MMU_CONTEXT *psMMUContext;
+#endif
+#if defined(PDUMP)
+ IMG_VOID *pvDumpCommand;
+ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
+ IMG_BOOL bPersistentProcess = IMG_FALSE;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32CallerID);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+ for(i=0;i<4;i++)
+ {
+ ui32CacheMasks[i] = 0;
+ }
+
+ psMMUContext = psDevInfo->hKernelMMUContext;
+ psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[0]);
+
+ /* Put the apps memory context in the bottom half */
+ if (hDevMemContext)
+ {
+ BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext;
+
+ psMMUContext = psBMContext->psMMUContext;
+ psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[2]);
+ }
+
+ /* If we have an outstanding flush request then set the cachecontrol bit */
+ if (ui32CacheMasks[0] || ui32CacheMasks[1] || ui32CacheMasks[2] || ui32CacheMasks[3])
+ {
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
+ }
+#endif
+
+#if defined(FIX_HW_BRN_28889)
+ /*
+ If the data cache and bif cache need invalidating there has been a cleanup
+ request. Therefore, we need to send the invalidate seperately and wait
+ for it to complete.
+ */
+ if ( (eCmdType != SGXMKIF_CMD_PROCESS_QUEUES) &&
+ ((psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_DATA) != 0) &&
+ ((psDevInfo->ui32CacheControl & (SGXMKIF_CC_INVAL_BIF_PT | SGXMKIF_CC_INVAL_BIF_PD)) != 0))
+ {
+ #if defined(PDUMP)
+ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
+ #endif
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+ SGXMKIF_COMMAND sCacheCommand = {0};
+
+ eError = SGXScheduleCCBCommand(psDeviceNode,
+ SGXMKIF_CMD_PROCESS_QUEUES,
+ &sCacheCommand,
+ ui32CallerID,
+ ui32PDumpFlags,
+ hDevMemContext,
+ bLastInScene);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+
+ /* Wait for the invalidate to happen */
+ #if !defined(NO_HARDWARE)
+ if(PollForValueKM(&psSGXHostCtl->ui32InvalStatus,
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ 2 * MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommand: Wait for uKernel to Invalidate BIF cache failed"));
+ PVR_DBG_BREAK;
+ }
+ #endif
+
+ #if defined(PDUMP)
+ /* Pdump the poll as well. */
+ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for BIF cache invalidate request to complete");
+ PDUMPMEMPOL(psSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32InvalStatus),
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+ #endif /* PDUMP */
+
+ psSGXHostCtl->ui32InvalStatus &= ~(PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE);
+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+ if ((eCmdType != SGXMKIF_CMD_FLUSHPDCACHE) && (psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_BIF_PD))
+ {
+ SGXMKIF_COMMAND sPDECacheCommand = {0};
+ IMG_DEV_PHYADDR sDevPAddr;
+
+ /* Put the kernel info in the top 1/2 of the data */
+ psMMUContext = psDevInfo->hKernelMMUContext;
+
+ psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr);
+ sPDECacheCommand.ui32Data[0] = sDevPAddr.uiAddr | 1;
+ sPDECacheCommand.ui32Data[1] = ui32CacheMasks[0];
+ sPDECacheCommand.ui32Data[2] = ui32CacheMasks[1];
+
+ /* Put the apps memory context in the bottom half */
+ if (hDevMemContext)
+ {
+ BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext;
+
+ psMMUContext = psBMContext->psMMUContext;
+
+ psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr);
+ /* Or in 1 to the lsb to show we have a valid context */
+ sPDECacheCommand.ui32Data[3] = sDevPAddr.uiAddr | 1;
+ sPDECacheCommand.ui32Data[4] = ui32CacheMasks[2];
+ sPDECacheCommand.ui32Data[5] = ui32CacheMasks[3];
+ }
+
+ /* Only do a kick if there is any update */
+ if (sPDECacheCommand.ui32Data[1] | sPDECacheCommand.ui32Data[2] | sPDECacheCommand.ui32Data[4] |
+ sPDECacheCommand.ui32Data[5])
+ {
+ eError = SGXScheduleCCBCommand(psDeviceNode,
+ SGXMKIF_CMD_FLUSHPDCACHE,
+ &sPDECacheCommand,
+ ui32CallerID,
+ ui32PDumpFlags,
+ hDevMemContext,
+ bLastInScene);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+ }
+ }
+#endif
+#if defined(PDUMP)
+ /*
+ * For persistent processes, the HW kicks should not go into the
+ * extended init phase; only keep memory transactions from the
+ * window system which are necessary to run the client app.
+ */
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc != IMG_NULL)
+ {
+ bPersistentProcess = psPerProc->bPDumpPersistent;
+ }
+ }
+#endif /* PDUMP */
+ psKernelCCB = psDevInfo->psKernelCCBInfo;
+
+ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
+
+ /* Wait for CCB space timed out */
+ if(!psSGXCommand)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Wait for CCB space timed out")) ;
+ eError = PVRSRV_ERROR_TIMEOUT;
+ goto Exit;
+ }
+
+ /* embed cache control word */
+ psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl;
+
+#if defined(PDUMP)
+ /* Accumulate any cache invalidates that may have happened */
+ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
+#endif
+
+ /* and clear it */
+ psDevInfo->ui32CacheControl = 0;
+
+ /* Copy command data over */
+ *psSGXCommand = *psCommandData;
+
+ if (eCmdType >= SGXMKIF_CMD_MAX)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Unknown command type: %d", eCmdType)) ;
+ eError = PVRSRV_ERROR_INVALID_CCB_COMMAND;
+ goto Exit;
+ }
+
+ if (eCmdType == SGXMKIF_CMD_2D ||
+ eCmdType == SGXMKIF_CMD_TRANSFER ||
+ ((eCmdType == SGXMKIF_CMD_TA) && bLastInScene))
+ {
+ SYS_DATA *psSysData;
+
+ /* CPU cache clean control */
+ SysAcquireData(&psSysData);
+
+ if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
+ {
+ OSFlushCPUCacheKM();
+ }
+ else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+ {
+ OSCleanCPUCacheKM();
+ }
+
+ /* Clear the pending op */
+ psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE;
+ }
+
+ PVR_ASSERT(eCmdType < SGXMKIF_CMD_MAX);
+ psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType]; /* PRQA S 3689 */ /* misuse of enums for bounds checking */
+
+#if defined(PDUMP)
+ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) &&
+ (bPersistentProcess == IMG_FALSE) )
+ {
+ /* Poll for space in the CCB. */
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n");
+ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
+ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
+ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff,
+ 0xff,
+ PDUMP_POLL_OPERATOR_NOTEQUAL,
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command (type == %d)\r\n", eCmdType);
+ pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND)));
+
+ PDUMPMEM(pvDumpCommand,
+ psKernelCCB->psCCBMemInfo,
+ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND),
+ sizeof(SGXMKIF_COMMAND),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
+
+ /* Overwrite cache control with pdump shadow */
+ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
+ psKernelCCB->psCCBMemInfo,
+ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) +
+ offsetof(SGXMKIF_COMMAND, ui32CacheControl),
+ sizeof(IMG_UINT32),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
+
+ if (PDumpIsCaptureFrameKM()
+ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ /* Clear cache invalidate shadow */
+ psDevInfo->sPDContext.ui32CacheControl = 0;
+ }
+ }
+#endif
+
+#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ /* Make sure the previous command has been read before send the next one */
+ eError = PollForValueKM (psKernelCCB->pui32ReadOffset,
+ *psKernelCCB->pui32WriteOffset,
+ 0xFF,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Timeout waiting for previous command to be read")) ;
+ eError = PVRSRV_ERROR_TIMEOUT;
+ goto Exit;
+ }
+#endif
+
+ /*
+ Increment the write offset
+ */
+ *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
+
+#if defined(PDUMP)
+ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) &&
+ (bPersistentProcess == IMG_FALSE) )
+ {
+ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n");
+ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
+ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
+ (psKernelCCB->ui32CCBDumpWOff),
+ 0xFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
+ #endif
+
+ if (PDumpIsCaptureFrameKM()
+ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
+ psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF;
+ }
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n");
+ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
+ psKernelCCB->psCCBCtlMemInfo,
+ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
+ sizeof(IMG_UINT32),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n");
+ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
+ psDevInfo->psKernelCCBEventKickerMemInfo,
+ 0,
+ sizeof(IMG_UINT32),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n");
+ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags);
+ #else
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags);
+ #endif
+ }
+#endif
+
+ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
+
+ /*
+ * New command submission is considered a proper handling of any pending
+ * IDLE or APM event, so mark them as handled to prevent other host threads
+ * from taking action.
+ */
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_IDLE;
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
+
+ OSWriteMemoryBarrier();
+
+ /* Order is importent for post processor! */
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE,
+ MKSYNC_TOKEN_KERNEL_CCB_OFFSET, *psKernelCCB->pui32WriteOffset);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE,
+ MKSYNC_TOKEN_CORE_CLK, psDevInfo->ui32CoreClockSpeed);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE,
+ MKSYNC_TOKEN_UKERNEL_CLK, psDevInfo->ui32uKernelTimerClock);
+
+
+#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
+ EUR_CR_EVENT_KICK2_NOW_MASK);
+#else
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
+ EUR_CR_EVENT_KICK_NOW_MASK);
+#endif
+
+ OSMemoryBarrier();
+
+#if defined(NO_HARDWARE)
+ /* Increment read offset */
+ *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
+#endif
+
+ ui64KickCount++;
+Exit:
+ return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXScheduleCCBCommandKM
+
+ @Description - Submits a CCB command and kicks the ukernel
+
+ @Input psDeviceNode - pointer to SGX device node
+ @Input eCmdType - see SGXMKIF_CMD_*
+ @Input psCommandData - kernel CCB command
+ @Input ui32CallerID - KERNEL_ID or ISR_ID
+ @Input ui32PDumpFlags
+
+ @Return ui32Error - success or failure
+
+******************************************************************************/
+PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCmdType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (ui32CallerID == ISR_ID)
+ {
+ SYS_DATA *psSysData;
+
+ /*
+ ISR failed to acquire lock so it must be held by a kernel thread.
+ Bring up and kick SGX if necessary when the lock is available.
+ */
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+ eError = PVRSRV_OK;
+
+ SysAcquireData(&psSysData);
+ OSScheduleMISR(psSysData);
+ }
+ else
+ {
+ /*
+ Return to srvclient for retry.
+ */
+ }
+
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
+ "ui32CallerID:%d eError:%u", ui32CallerID, eError));
+ return eError;
+ }
+
+ /* Note that a power-up has been dumped in the init phase. */
+ PDUMPSUSPEND();
+
+ SysSGXCommandPending(psDevInfo->bSGXIdle);
+ psDevInfo->bSGXIdle = IMG_FALSE;
+
+ /* Ensure that SGX is powered up before kicking the ukernel. */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE_ON);
+
+ PDUMPRESUME();
+
+ if (eError == PVRSRV_OK)
+ {
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
+ "ui32CallerID:%d eError:%u", ui32CallerID, eError));
+ return eError;
+ }
+
+ eError = SGXScheduleCCBCommand(psDeviceNode, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags, hDevMemContext, bLastInScene);
+
+ PVRSRVPowerUnlock(ui32CallerID);
+ return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXScheduleProcessQueuesKM
+
+ @Description - Software command complete handler
+
+ @Input psDeviceNode - SGX device node
+
+******************************************************************************/
+PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32PowerStatus;
+ SGXMKIF_COMMAND sCommand = {0};
+
+ ui32PowerStatus = psHostCtl->ui32PowerStatus;
+ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
+ {
+ /* The ukernel has no work to do so don't waste power. */
+ return PVRSRV_OK;
+ }
+
+ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %u", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function SGXIsDevicePowered
+
+ @Description
+
+ Whether the device is powered, for the purposes of lockup detection.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Return IMG_BOOL : Whether device is powered
+
+******************************************************************************/
+IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
+}
+
+/*!
+*******************************************************************************
+
+ @Function SGXGetInternalDevInfoKM
+
+ @Description
+ Gets device information that is not intended to be passed
+ on beyond the srvclient libs.
+
+ @Input hDevCookie
+
+ @Output psSGXInternalDevInfo
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_INTERNAL_DEVINFO_KM *psSGXInternalDevInfo)
+#else
+ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo)
+#endif
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
+ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
+
+ /* This should be patched up by OS bridge code */
+ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
+ (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
+
+ return PVRSRV_OK;
+}
+
+
+/*****************************************************************************
+ FUNCTION : SGXCleanupRequest
+
+ PURPOSE : Wait for the microkernel to clean up its references to either a
+ render context or render target.
+
+ PARAMETERS : psDeviceNode - SGX device node
+ psHWDataDevVAddr - Device Address of the resource
+ ui32CleanupType - PVRSRV_CLEANUPCMD_*
+ bForceCleanup - Skips sync polling
+
+ RETURNS : error status
+*****************************************************************************/
+PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
+ IMG_UINT32 ui32CleanupType,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_KERNEL_MEM_INFO *psHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
+ SGXMKIF_HOST_CTL *psHostCtl = psHostCtlMemInfo->pvLinAddrKM;
+
+ SGXMKIF_COMMAND sCommand = {0};
+
+
+ if (bForceCleanup != FORCE_CLEANUP)
+ {
+ sCommand.ui32Data[0] = ui32CleanupType;
+ sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr;
+ PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resource clean-up, Type %u, Data 0x%X", sCommand.ui32Data[0], sCommand.ui32Data[1]);
+
+ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command"));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ return eError;
+ }
+
+ /* Wait for the uKernel process the cleanup request */
+ #if !defined(NO_HARDWARE)
+ if(PollForValueKM(&psHostCtl->ui32CleanupStatus,
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
+ 10 * MAX_HW_TIME_US,
+ 1000,
+ IMG_TRUE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up (%u) failed", ui32CleanupType));
+ eError = PVRSRV_ERROR_TIMEOUT;
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ }
+ #endif
+
+ #if defined(PDUMP)
+ /*
+ Pdump the poll as well.
+ Note:
+ We don't expect the cleanup to report busy as the client should have
+ ensured the the resource has been finished with before requesting
+ it's cleanup. This isn't true of the abnormal termination case but
+ we don't expect to PDump that. Unless/until PDump has flow control
+ there isn't anything else we can do.
+ */
+ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete");
+ PDUMPMEMPOL(psHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE,
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psHostCtlMemInfo));
+ #endif /* PDUMP */
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_BUSY)
+ {
+ /* Only one flag should be set */
+ PVR_ASSERT((psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_DONE) == 0);
+ eError = PVRSRV_ERROR_RETRY;
+ psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_BUSY);
+ }
+ else
+ {
+ eError = PVRSRV_OK;
+ psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE);
+ }
+
+ PDUMPMEM(IMG_NULL, psHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psHostCtlMemInfo));
+
+ /* Request the cache invalidate */
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ psDevInfo->ui32CacheControl |= (SGXMKIF_CC_INVAL_BIF_SL | SGXMKIF_CC_INVAL_DATA);
+#else
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_DATA;
+#endif
+ return eError;
+}
+
+
+typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psHWRenderContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_ITEM psResItem;
+ IMG_BOOL bCleanupTimerRunning;
+ IMG_PVOID pvTimeData;
+} SGX_HW_RENDER_CONTEXT_CLEANUP;
+
+
+static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHWRenderContextMemInfo->sDevVAddr,
+ PVRSRV_CLEANUPCMD_RC,
+ bForceCleanup);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (!psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US);
+ psCleanup->bCleanupTimerRunning = IMG_TRUE;
+ }
+ else
+ {
+ if (OSTimeHasTimePassed(psCleanup->pvTimeData))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+ }
+ else
+ {
+ if (psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ /* Free the Device Mem allocated */
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHWRenderContextMemInfo);
+
+ /* Finally, free the cleanup structure itself */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+ /*not nulling pointer, copy on stack*/
+ }
+
+ return eError;
+}
+
+typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psHWTransferContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_ITEM psResItem;
+ IMG_BOOL bCleanupTimerRunning;
+ IMG_PVOID pvTimeData;
+} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
+
+
+static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHWTransferContextMemInfo->sDevVAddr,
+ PVRSRV_CLEANUPCMD_TC,
+ bForceCleanup);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (!psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US);
+ psCleanup->bCleanupTimerRunning = IMG_TRUE;
+ }
+ else
+ {
+ if (OSTimeHasTimePassed(psCleanup->pvTimeData))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+ }
+ else
+ {
+ if (psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ /* Free the Device Mem allocated */
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHWTransferContextMemInfo);
+
+ /* Finally, free the cleanup structure itself */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+ /*not nulling pointer, copy on stack*/
+ }
+
+ return eError;
+}
+
+IMG_EXPORT
+IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr,
+ IMG_UINT32 ui32HWRenderContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ PRESMAN_ITEM psResItem;
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ (IMG_VOID **)&psCleanup,
+ &hBlockAlloc,
+ "SGX Hardware Render Context Cleanup");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HWRenderContextSize,
+ 32,
+ IMG_NULL,
+ 0,
+ 0,0,0,IMG_NULL, /* No sparse mapping data */
+ &psCleanup->psHWRenderContextMemInfo,
+ "HW Render Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHWRenderContextMemInfo->pvLinAddrKM,
+ psHWRenderContextCpuVAddr,
+ ui32HWRenderContextSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+ /* Pass the DevVAddr of the new context back up through the bridge */
+ psHWRenderContextDevVAddr->uiAddr = psCleanup->psHWRenderContextMemInfo->sDevVAddr.uiAddr;
+
+ /* Retrieve the PDDevPAddr */
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+ /*
+ patch-in the Page-Directory Device-Physical address. Note that this is
+ copied-in one byte at a time, as we have no guarantee that the usermode-
+ provided ui32OffsetToPDDevPAddr is a validly-aligned address for the
+ current CPU architecture.
+ */
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+ /* PDUMP the HW context */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Render context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHWRenderContextMemInfo,
+ 0,
+ ui32HWRenderContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo));
+
+ /* PDUMP the PDDevPAddr */
+ PDUMPCOMMENT("Page directory address in HW render context");
+ PDUMPPDDEVPADDR(
+ psCleanup->psHWRenderContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
+ psCleanup->hBlockAlloc = hBlockAlloc;
+ psCleanup->psDeviceNode = psDeviceNode;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_HW_RENDER_CONTEXT,
+ (IMG_VOID *)psCleanup,
+ 0,
+ &SGXCleanupHWRenderContextCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
+
+ psCleanup->psResItem = psResItem;
+
+ return (IMG_HANDLE)psCleanup;
+
+/* Error exit paths */
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHWRenderContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+ /*not nulling pointer, out of scope*/
+exit0:
+ return IMG_NULL;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+
+ PVR_ASSERT(hHWRenderContext != IMG_NULL);
+
+ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
+
+ if (psCleanup == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup);
+
+ return eError;
+}
+
+
+IMG_EXPORT
+IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr,
+ IMG_UINT32 ui32HWTransferContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ PRESMAN_ITEM psResItem;
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ (IMG_VOID **)&psCleanup,
+ &hBlockAlloc,
+ "SGX Hardware Transfer Context Cleanup");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HWTransferContextSize,
+ 32,
+ IMG_NULL,
+ 0,
+ 0,0,0,IMG_NULL, /* No sparse mapping data */
+ &psCleanup->psHWTransferContextMemInfo,
+ "HW Render Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHWTransferContextMemInfo->pvLinAddrKM,
+ psHWTransferContextCpuVAddr,
+ ui32HWTransferContextSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+ /* Pass the DevVAddr of the new context back up through the bridge */
+ psHWTransferContextDevVAddr->uiAddr = psCleanup->psHWTransferContextMemInfo->sDevVAddr.uiAddr;
+
+ /* Retrieve the PDDevPAddr */
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+ /*
+ patch-in the Page-Directory Device-Physical address. Note that this is
+ copied-in one byte at a time, as we have no guarantee that the usermode-
+ provided ui32OffsetToPDDevPAddr is a validly-aligned address for the
+ current CPU architecture.
+ */
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+ /* PDUMP the HW Transfer Context */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Transfer context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHWTransferContextMemInfo,
+ 0,
+ ui32HWTransferContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo));
+
+ /* PDUMP the PDDevPAddr */
+ PDUMPCOMMENT("Page directory address in HW transfer context");
+
+ PDUMPPDDEVPADDR(
+ psCleanup->psHWTransferContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
+ psCleanup->hBlockAlloc = hBlockAlloc;
+ psCleanup->psDeviceNode = psDeviceNode;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
+ psCleanup,
+ 0,
+ &SGXCleanupHWTransferContextCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
+
+ psCleanup->psResItem = psResItem;
+
+ return (IMG_HANDLE)psCleanup;
+
+/* Error exit paths */
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHWTransferContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+ /*not nulling pointer, out of scope*/
+
+exit0:
+ return IMG_NULL;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+
+ PVR_ASSERT(hHWTransferContext != IMG_NULL);
+
+ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
+
+ if (psCleanup == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup);
+
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXSetTransferContextPriorityKM(
+ IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWTransferContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField)
+{
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ int iPtrByte;
+ PVR_UNREFERENCED_PARAMETER(hDeviceNode);
+
+ if (hHWTransferContext != IMG_NULL)
+ {
+ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
+
+ if ((ui32OffsetOfPriorityField + sizeof(ui32Priority))
+ >= psCleanup->psHWTransferContextMemInfo->uAllocSize)
+ {
+ PVR_DPF((
+ PVR_DBG_ERROR,
+ "SGXSetTransferContextPriorityKM: invalid context prioirty offset"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /*
+ cannot be sure that offset (passed from user-land) is safe to deref
+ as a word-ptr on current CPU arch: copy one byte at a time.
+ */
+ pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetOfPriorityField;
+ pSrc = (IMG_UINT8 *)&ui32Priority;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+ }
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXSetRenderContextPriorityKM(
+ IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWRenderContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField)
+{
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ int iPtrByte;
+ PVR_UNREFERENCED_PARAMETER(hDeviceNode);
+
+ if (hHWRenderContext != IMG_NULL)
+ {
+ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
+ if ((ui32OffsetOfPriorityField + sizeof(ui32Priority))
+ >= psCleanup->psHWRenderContextMemInfo->uAllocSize)
+ {
+ PVR_DPF((
+ PVR_DBG_ERROR,
+ "SGXSetContextPriorityKM: invalid HWRenderContext prioirty offset"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /*
+ cannot be sure that offset (passed from user-land) is safe to deref
+ as a word-ptr on current CPU arch: copy one byte at a time.
+ */
+ pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetOfPriorityField;
+
+ pSrc = (IMG_UINT8 *)&ui32Priority;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psHW2DContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_ITEM psResItem;
+ IMG_BOOL bCleanupTimerRunning;
+ IMG_PVOID pvTimeData;
+} SGX_HW_2D_CONTEXT_CLEANUP;
+
+static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+ /* First, ensure the context is no longer being utilised */
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHW2DContextMemInfo->sDevVAddr,
+ PVRSRV_CLEANUPCMD_2DC,
+ bForceCleanup);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (!psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US);
+ psCleanup->bCleanupTimerRunning = IMG_TRUE;
+ }
+ else
+ {
+ if (OSTimeHasTimePassed(psCleanup->pvTimeData))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+ }
+ else
+ {
+ if (psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ /* Free the Device Mem allocated */
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHW2DContextMemInfo);
+
+ /* Finally, free the cleanup structure itself */
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+ /*not nulling pointer, copy on stack*/
+ }
+ return eError;
+}
+
+IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr,
+ IMG_UINT32 ui32HW2DContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ PRESMAN_ITEM psResItem;
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ (IMG_VOID **)&psCleanup,
+ &hBlockAlloc,
+ "SGX Hardware 2D Context Cleanup");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HW2DContextSize,
+ 32,
+ IMG_NULL,
+ 0,
+ 0,0,0,IMG_NULL, /* No sparse mapping data */
+ &psCleanup->psHW2DContextMemInfo,
+ "HW 2D Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHW2DContextMemInfo->pvLinAddrKM,
+ psHW2DContextCpuVAddr,
+ ui32HW2DContextSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+ /* Pass the DevVAddr of the new context back up through the bridge */
+ psHW2DContextDevVAddr->uiAddr = psCleanup->psHW2DContextMemInfo->sDevVAddr.uiAddr;
+
+ /* Retrieve the PDDevPAddr */
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+ /*
+ patch-in the Page-Directory Device-Physical address. Note that this is
+ copied-in one byte at a time, as we have no guarantee that the usermode-
+ provided ui32OffsetToPDDevPAddr is a validly-aligned address for the
+ current CPU architecture.
+ */
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHW2DContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+ /* PDUMP the HW 2D Context */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW 2D context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHW2DContextMemInfo,
+ 0,
+ ui32HW2DContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo));
+
+ /* PDUMP the PDDevPAddr */
+ PDUMPCOMMENT("Page directory address in HW 2D transfer context");
+ PDUMPPDDEVPADDR(
+ psCleanup->psHW2DContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
+ psCleanup->hBlockAlloc = hBlockAlloc;
+ psCleanup->psDeviceNode = psDeviceNode;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_HW_2D_CONTEXT,
+ psCleanup,
+ 0,
+ &SGXCleanupHW2DContextCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
+
+ psCleanup->psResItem = psResItem;
+
+ return (IMG_HANDLE)psCleanup;
+
+/* Error exit paths */
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHW2DContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+ /*not nulling pointer, out of scope*/
+exit0:
+ return IMG_NULL;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
+
+ PVR_ASSERT(hHW2DContext != IMG_NULL);
+
+ if (hHW2DContext == IMG_NULL)
+ {
+ return (PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
+
+ eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup);
+
+ return eError;
+}
+#endif /* #if defined(SGX_FEATURE_2D_HARDWARE)*/
+
+/*!****************************************************************************
+ @Function SGX2DQuerySyncOpsCompleteKM
+
+ @Input psSyncInfo : Sync object to be queried
+
+ @Return IMG_TRUE - ops complete, IMG_FALSE - ops pending
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SGX2DQuerySyncOpsComplete)
+#endif
+static INLINE
+IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
+ IMG_UINT32 ui32ReadOpsPending,
+ IMG_UINT32 ui32WriteOpsPending)
+{
+ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
+
+ return (IMG_BOOL)(
+ (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) &&
+ (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending)
+ );
+}
+
+/*!****************************************************************************
+ @Function SGX2DQueryBlitsCompleteKM
+
+ @Input psDevInfo : SGX device info structure
+
+ @Input psSyncInfo : Sync object to be queried
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
+ IMG_BOOL bWaitForComplete)
+{
+ IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending;
+
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
+
+ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
+
+ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
+ {
+ /* Instant success */
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
+ return PVRSRV_OK;
+ }
+
+ /* Not complete yet */
+ if (!bWaitForComplete)
+ {
+ /* Just report not complete */
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
+ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ }
+
+ /* Start polling */
+ PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ OSSleepms(1);
+
+ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
+ {
+ /* Success */
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete."));
+ return PVRSRV_OK;
+ }
+
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Timed out */
+ PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
+
+#if defined(DEBUG)
+ {
+ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
+
+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: 0x%x, Syncdata: 0x%x",
+ (IMG_UINTPTR_T)psSyncInfo, (IMG_UINTPTR_T)psSyncData));
+
+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
+
+ }
+#endif
+
+ return PVRSRV_ERROR_TIMEOUT;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode,
+ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr,
+ IMG_BOOL bForceCleanup)
+{
+ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
+
+ return SGXCleanupRequest(psDeviceNode,
+ &sHWRTDataSetDevVAddr,
+ PVRSRV_CLEANUPCMD_RT,
+ bForceCleanup);
+}
+
+
+IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TimeWraps,
+ IMG_UINT32 ui32Time)
+{
+#if defined(EUR_CR_TIMER)
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(ui32TimeWraps);
+ return ui32Time;
+#else
+ IMG_UINT64 ui64Clocks;
+ IMG_UINT32 ui32Clocksx16;
+
+ ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
+ (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
+ ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16);
+
+ return ui32Clocksx16;
+#endif /* EUR_CR_TIMER */
+}
+
+
+IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32SGXClocks)
+{
+ /*
+ Round up to the next microsecond.
+ */
+ OSWaitus(1 + (ui32SGXClocks * 1000000 / psDevInfo->ui32CoreClockSpeed));
+}
+
+
+
+/******************************************************************************
+ End of file (sgxutils.c)
+******************************************************************************/
diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxutils.h b/pvr-source/services4/srvkm/devices/sgx/sgxutils.h
new file mode 100644
index 0000000..fc2ef6f
--- /dev/null
+++ b/pvr-source/services4/srvkm/devices/sgx/sgxutils.h
@@ -0,0 +1,195 @@
+/*************************************************************************/ /*!
+@Title Device specific utility routines declarations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Inline functions/structures specific to SGX
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "perproc.h"
+#include "sgxinfokm.h"
+
+/* PRQA S 3410 7 */ /* macros require the absence of some brackets */
+#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
+ ((sizeof(type) <= (psCCBMemInfo)->uAllocSize) && \
+ ((psCCBKick)->offset <= (psCCBMemInfo)->uAllocSize - sizeof(type)))
+
+#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
+ ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \
+ (psCCBKick)->offset))
+
+extern IMG_UINT64 ui64KickCount;
+
+
+IMG_IMPORT
+IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32CallerID);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCommandType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene);
+IMG_IMPORT
+PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCommandType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT
+IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT
+IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr,
+ IMG_UINT32 ui32HWRenderContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+IMG_IMPORT
+IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr,
+ IMG_UINT32 ui32HWTransferContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo,
+ IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr,
+ IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXSetRenderContextPriorityKM(IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWRenderContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXSetTransferContextPriorityKM(IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWTransferContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField);
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+IMG_IMPORT
+IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr,
+ IMG_UINT32 ui32HW2DContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup);
+#endif
+
+IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TimeWraps,
+ IMG_UINT32 ui32Time);
+
+/*!
+*******************************************************************************
+
+ @Function SGXWaitClocks
+
+ @Description
+
+ Wait for a specified number of SGX clock cycles to elapse.
+
+ @Input psDevInfo - SGX Device Info
+ @Input ui32SGXClocks - number of clock cycles to wait
+
+ @Return IMG_VOID
+
+******************************************************************************/
+IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32SGXClocks);
+
+PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
+ IMG_UINT32 ui32CleanupType,
+ IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetSGXRevDataKM(PVRSRV_DEVICE_NODE* psDeviceNode, IMG_UINT32 *pui32SGXCoreRev,
+ IMG_UINT32 *pui32SGXCoreID);
+
+/*!
+******************************************************************************
+
+ @Function SGXContextSuspend
+
+ @Description - Interface to the SGX microkernel to instruct it to suspend or
+ resume processing on a given context. This will interrupt current
+ processing of this context if a task is already running and is
+ interruptable.
+
+ @Input psDeviceNode SGX device node
+ @Input psHWContextDevVAddr SGX virtual address of the context to be suspended
+ or resumed. Can be of type SGXMKIF_HWRENDERCONTEXT,
+ SGXMKIF_HWTRANSFERCONTEXT or SGXMKIF_HW2DCONTEXT
+ @Input bResume IMG_TRUE to put a context into suspend state,
+ IMG_FALSE to resume a previously suspended context
+
+******************************************************************************/
+PVRSRV_ERROR SGXContextSuspend(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWContextDevVAddr,
+ IMG_BOOL bResume);
+
+/******************************************************************************
+ End of file (sgxutils.h)
+******************************************************************************/