summaryrefslogtreecommitdiffstats
path: root/tiler
diff options
context:
space:
mode:
authorIliyan Malchev <malchev@google.com>2011-08-08 11:24:41 -0700
committerIliyan Malchev <malchev@google.com>2011-08-08 11:46:17 -0700
commita40968e9b9abcdcc042948ea73346b020279d4b7 (patch)
tree926da5b4447017617fcc255b490eaee5174a4a9a /tiler
parent8558b0909d3f42288f488e6fd1341877c639a594 (diff)
downloadhardware_ti_omap4-a40968e9b9abcdcc042948ea73346b020279d4b7.zip
hardware_ti_omap4-a40968e9b9abcdcc042948ea73346b020279d4b7.tar.gz
hardware_ti_omap4-a40968e9b9abcdcc042948ea73346b020279d4b7.tar.bz2
initial commit
Change-Id: I8f7a7eeece0e516efa486b77e9d97805c0e65d3e Signed-off-by: Iliyan Malchev <malchev@google.com>
Diffstat (limited to 'tiler')
-rw-r--r--tiler/.gitignore27
-rw-r--r--tiler/Android.mk33
-rw-r--r--tiler/Makefile.am80
-rw-r--r--tiler/README136
-rwxr-xr-xtiler/bootstrap.sh18
-rwxr-xr-xtiler/configure.ac75
-rw-r--r--tiler/debug_utils.h211
-rwxr-xr-xtiler/fill_utr.py92
-rw-r--r--tiler/libtimemmgr.pc.in13
-rw-r--r--tiler/list_utils.h511
-rw-r--r--tiler/mem_types.h101
-rw-r--r--tiler/memmgr.c1078
-rw-r--r--tiler/memmgr.h311
-rw-r--r--tiler/memmgr_test.c1693
-rw-r--r--tiler/testlib.c163
-rw-r--r--tiler/testlib.h101
-rw-r--r--tiler/tiler.h105
-rw-r--r--tiler/tiler_ptest.c729
-rw-r--r--tiler/tilermem.h79
-rw-r--r--tiler/tilermem_utils.h52
-rw-r--r--tiler/tilermgr.c203
-rw-r--r--tiler/tilermgr.h54
-rw-r--r--tiler/utils.h63
-rw-r--r--tiler/utils_test.c489
24 files changed, 6417 insertions, 0 deletions
diff --git a/tiler/.gitignore b/tiler/.gitignore
new file mode 100644
index 0000000..c23ec1a
--- /dev/null
+++ b/tiler/.gitignore
@@ -0,0 +1,27 @@
+aclocal.m4
+autom4te.cache
+Makefile.in
+Makefile
+ltmain.sh
+stamp-h1
+.deps
+.libs
+*.o
+*.lo
+*.la
+libtool
+*.pc
+config.log
+config.status
+config.guess
+config.h
+config.h.in
+config.sub
+config
+configure
+m4
+build
+*~
+tests/testlib.c
+*_test
+*_ptest \ No newline at end of file
diff --git a/tiler/Android.mk b/tiler/Android.mk
new file mode 100644
index 0000000..60c8699
--- /dev/null
+++ b/tiler/Android.mk
@@ -0,0 +1,33 @@
+# only include if running on an omap4 platform
+ifeq ($(TARGET_BOARD_PLATFORM),omap4)
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := memmgr.c tilermgr.c
+LOCAL_MODULE := libtimemmgr
+LOCAL_MODULE_TAGS := optional tests
+include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := memmgr_test.c testlib.c
+LOCAL_SHARED_LIBRARIES := libtimemmgr
+LOCAL_MODULE := memmgr_test
+LOCAL_MODULE_TAGS := tests
+include $(BUILD_HEAPTRACKED_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := utils_test.c testlib.c
+LOCAL_SHARED_LIBRARIES := libtimemmgr
+LOCAL_MODULE := utils_test
+LOCAL_MODULE_TAGS := tests
+include $(BUILD_HEAPTRCKED_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := tiler_ptest.c
+LOCAL_SHARED_LIBRARIES := libtimemmgr
+LOCAL_MODULE := tiler_ptest
+LOCAL_MODULE_TAGS:= tests
+include $(BUILD_HEAPTRACKED_EXECUTABLE)
+
+endif
diff --git a/tiler/Makefile.am b/tiler/Makefile.am
new file mode 100644
index 0000000..9f9e70b
--- /dev/null
+++ b/tiler/Makefile.am
@@ -0,0 +1,80 @@
+#
+# Makefile.am
+#
+# TI OMAP Processor Memory Allocator Interface functions makefile.
+#
+# Copyright (C) 2009-2011 Texas Instruments, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Texas Instruments Incorporated nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+AUTOMAKE_OPTIONS = foreign
+
+## sources
+
+h_sources = memmgr.h tilermem.h mem_types.h tiler.h tilermem_utils.h
+if STUB_TILER
+c_sources = memmgr.c
+else
+c_sources = memmgr.c tilermgr.c
+endif
+
+if TILERMGR
+library_include_HEADERS = $(h_sources) tilermgr.h
+else
+library_include_HEADERS = $(h_sources)
+noinst_HEADERS = tilermgr.h
+endif
+
+#Install the headers in library directory - e.g. examplelib-1.0:
+library_includedir=$(includedir)/timemmgr
+
+# library sources
+lib_LTLIBRARIES= libtimemmgr.la
+libtimemmgr_la_SOURCES = $(h_sources) $(c_sources)
+libtimemmgr_la_CFLAGS = $(MEMMGR_CFLAGS) -fpic -ansi
+libtimemmgr_la_LIBTOOLFLAGS = --tag=disable-static
+libtimemmgr_la_LDFLAGS = -version-info 2:0:0
+
+if UNIT_TESTS
+bin_PROGRAMS = utils_test memmgr_test tiler_ptest
+
+utils_testdir = .
+utils_test_SOURCES = utils_test.c testlib.c
+
+memmgr_testdir = .
+memmgr_test_SOURCES = memmgr_test.c testlib.c
+memmgr_test_LDADD = libtimemmgr.la
+
+tiler_ptest_SOURCES = tiler_ptest.c
+tiler_ptest_LDADD = libtimemmgr.la
+endif
+
+pkgconfig_DATA = libtimemmgr.pc
+pkgconfigdir = $(libdir)/pkgconfig
+
diff --git a/tiler/README b/tiler/README
new file mode 100644
index 0000000..e531b09
--- /dev/null
+++ b/tiler/README
@@ -0,0 +1,136 @@
+Validating MemMgr
+
+ MemMgr tests are not persistently enumerated, so the test # in
+ memmgr test may vary from release to release.
+
+ You need to run the test with the "list" argument to list all the test
+ descriptions and the corresponding test numbers.
+
+ E.g. "memmgr_test list"
+
+ If you have access to the official test report, the details column lists
+ the test description (in the last line). Match the test case description
+ to the last line of the cells in the Details column.
+
+ Negative MemMgr tests run multiple test cases, so they map to multiple rows
+ in the official test report. Some of the test cases may not be available,
+ or may succeed even if the whole test failed/succeeded.
+
+ To ease the generation of the test report, a python script is provided that
+ will populate most of the test report:
+
+ run memmgr_test and pipe the output of the tests into a
+ file, e.g.:
+
+ memmgr_test > test.log
+
+ open UTR and select Tiler User Space worksheet
+ run this script by piping the test logs into the script, e.g. (you may
+ have to copy the log file from the target to a location that you can
+ access from the machine that has the UTR open.)
+
+ python fill_utr.py < test.log
+
+Latest List of test cases
+
+memmgr_test
+
+TEST # 1 - alloc_1D_test(4096, 0)
+TEST # 2 - alloc_2D_test(64, 64, PIXEL_FMT_8BIT)
+TEST # 3 - alloc_2D_test(64, 64, PIXEL_FMT_16BIT)
+TEST # 4 - alloc_2D_test(64, 64, PIXEL_FMT_32BIT)
+TEST # 5 - alloc_NV12_test(64, 64)
+TEST # 6 - map_1D_test(4096, 0)
+TEST # 7 - alloc_1D_test(176 * 144 * 2, 512)
+TEST # 8 - alloc_2D_test(176, 144, PIXEL_FMT_8BIT)
+TEST # 9 - alloc_2D_test(176, 144, PIXEL_FMT_16BIT)
+TEST # 10 - alloc_2D_test(176, 144, PIXEL_FMT_32BIT)
+TEST # 11 - alloc_NV12_test(176, 144)
+TEST # 12 - map_1D_test(176 * 144 * 2, 2048)
+TEST # 13 - alloc_1D_test(640 * 480 * 2, 0)
+TEST # 14 - alloc_2D_test(640, 480, PIXEL_FMT_8BIT)
+TEST # 15 - alloc_2D_test(640, 480, PIXEL_FMT_16BIT)
+TEST # 16 - alloc_2D_test(640, 480, PIXEL_FMT_32BIT)
+TEST # 17 - alloc_NV12_test(640, 480)
+TEST # 18 - map_1D_test(640 * 480 * 2, 0)
+TEST # 19 - alloc_1D_test(848 * 480 * 2, 0)
+TEST # 20 - alloc_2D_test(848, 480, PIXEL_FMT_8BIT)
+TEST # 21 - alloc_2D_test(848, 480, PIXEL_FMT_16BIT)
+TEST # 22 - alloc_2D_test(848, 480, PIXEL_FMT_32BIT)
+TEST # 23 - alloc_NV12_test(848, 480)
+TEST # 24 - map_1D_test(848 * 480 * 2, 0)
+TEST # 25 - alloc_1D_test(1280 * 720 * 2, 0)
+TEST # 26 - alloc_2D_test(1280, 720, PIXEL_FMT_8BIT)
+TEST # 27 - alloc_2D_test(1280, 720, PIXEL_FMT_16BIT)
+TEST # 28 - alloc_2D_test(1280, 720, PIXEL_FMT_32BIT)
+TEST # 29 - alloc_NV12_test(1280, 720)
+TEST # 30 - map_1D_test(1280 * 720 * 2, 0)
+TEST # 31 - alloc_1D_test(1920 * 1080 * 2, 0)
+TEST # 32 - alloc_2D_test(1920, 1080, PIXEL_FMT_8BIT)
+TEST # 33 - alloc_2D_test(1920, 1080, PIXEL_FMT_16BIT)
+TEST # 34 - alloc_2D_test(1920, 1080, PIXEL_FMT_32BIT)
+TEST # 35 - alloc_NV12_test(1920, 1080)
+TEST # 36 - map_1D_test(1920 * 1080 * 2, 0)
+TEST # 37 - neg_alloc_tests()
+TEST # 38 - neg_free_tests()
+TEST # 39 - neg_map_tests()
+TEST # 40 - neg_unmap_tests()
+TEST # 41 - neg_check_tests()
+TEST # 42 - page_size_test()
+TEST # 43 - maxalloc_2D_test(2500, 32, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 44 - maxalloc_2D_test(2500, 16, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 45 - maxalloc_2D_test(1250, 16, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 46 - maxalloc_2D_test(5000, 32, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 47 - maxalloc_2D_test(5000, 16, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 48 - maxalloc_2D_test(2500, 16, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 49 - alloc_2D_test(8193, 16, PIXEL_FMT_8BIT)
+TEST # 50 - alloc_2D_test(8193, 16, PIXEL_FMT_16BIT)
+TEST # 51 - alloc_2D_test(4097, 16, PIXEL_FMT_32BIT)
+TEST # 52 - alloc_2D_test(16384, 16, PIXEL_FMT_8BIT)
+TEST # 53 - alloc_2D_test(16384, 16, PIXEL_FMT_16BIT)
+TEST # 54 - alloc_2D_test(8192, 16, PIXEL_FMT_32BIT)
+TEST # 55 - !alloc_2D_test(16385, 16, PIXEL_FMT_8BIT)
+TEST # 56 - !alloc_2D_test(16385, 16, PIXEL_FMT_16BIT)
+TEST # 57 - !alloc_2D_test(8193, 16, PIXEL_FMT_32BIT)
+TEST # 58 - maxalloc_1D_test(4096, MAX_ALLOCS)
+TEST # 59 - maxalloc_2D_test(64, 64, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 60 - maxalloc_2D_test(64, 64, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 61 - maxalloc_2D_test(64, 64, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 62 - maxalloc_NV12_test(64, 64, MAX_ALLOCS)
+TEST # 63 - maxmap_1D_test(4096, MAX_ALLOCS)
+TEST # 64 - maxalloc_1D_test(176 * 144 * 2, MAX_ALLOCS)
+TEST # 65 - maxalloc_2D_test(176, 144, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 66 - maxalloc_2D_test(176, 144, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 67 - maxalloc_2D_test(176, 144, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 68 - maxalloc_NV12_test(176, 144, MAX_ALLOCS)
+TEST # 69 - maxmap_1D_test(176 * 144 * 2, MAX_ALLOCS)
+TEST # 70 - maxalloc_1D_test(640 * 480 * 2, MAX_ALLOCS)
+TEST # 71 - maxalloc_2D_test(640, 480, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 72 - maxalloc_2D_test(640, 480, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 73 - maxalloc_2D_test(640, 480, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 74 - maxalloc_NV12_test(640, 480, MAX_ALLOCS)
+TEST # 75 - maxmap_1D_test(640 * 480 * 2, MAX_ALLOCS)
+TEST # 76 - maxalloc_1D_test(848 * 480 * 2, MAX_ALLOCS)
+TEST # 77 - maxalloc_2D_test(848, 480, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 78 - maxalloc_2D_test(848, 480, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 79 - maxalloc_2D_test(848, 480, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 80 - maxalloc_NV12_test(848, 480, MAX_ALLOCS)
+TEST # 81 - maxmap_1D_test(848 * 480 * 2, MAX_ALLOCS)
+TEST # 82 - maxalloc_1D_test(1280 * 720 * 2, MAX_ALLOCS)
+TEST # 83 - maxalloc_2D_test(1280, 720, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 84 - maxalloc_2D_test(1280, 720, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 85 - maxalloc_2D_test(1280, 720, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 86 - maxalloc_NV12_test(1280, 720, MAX_ALLOCS)
+TEST # 87 - maxmap_1D_test(1280 * 720 * 2, MAX_ALLOCS)
+TEST # 88 - maxalloc_1D_test(1920 * 1080 * 2, MAX_ALLOCS)
+TEST # 89 - maxalloc_2D_test(1920, 1080, PIXEL_FMT_8BIT, MAX_ALLOCS)
+TEST # 90 - maxalloc_2D_test(1920, 1080, PIXEL_FMT_16BIT, MAX_ALLOCS)
+TEST # 91 - maxalloc_2D_test(1920, 1080, PIXEL_FMT_32BIT, MAX_ALLOCS)
+TEST # 92 - maxalloc_NV12_test(1920, 1080, 2)
+TEST # 93 - maxalloc_NV12_test(1920, 1080, MAX_ALLOCS)
+TEST # 94 - maxmap_1D_test(1920 * 1080 * 2, MAX_ALLOCS)
+TEST # 95 - star_tiler_test(1000, 10)
+TEST # 96 - star_tiler_test(1000, 30)
+TEST # 97 - star_test(100, 10)
+TEST # 98 - star_test(1000, 10)
+
diff --git a/tiler/bootstrap.sh b/tiler/bootstrap.sh
new file mode 100755
index 0000000..6170918
--- /dev/null
+++ b/tiler/bootstrap.sh
@@ -0,0 +1,18 @@
+#! /bin/sh
+
+cd `dirname $0`
+
+# on some platforms, you have "g" versions of some of these tools instead,
+# ie glibtoolize instead of libtoolize..
+find_tool() {
+ which $1 2> /dev/null || which g$1 2> /dev/null
+}
+
+aclocal=`find_tool aclocal`
+libtoolize=`find_tool libtoolize`
+automake=`find_tool automake`
+autoconf=`find_tool autoconf`
+autoheader=`find_tool autoheader`
+
+mkdir -p config && $autoheader && $aclocal && $libtoolize --copy --force && $automake --copy --add-missing --foreign && $autoconf
+
diff --git a/tiler/configure.ac b/tiler/configure.ac
new file mode 100755
index 0000000..30d131c
--- /dev/null
+++ b/tiler/configure.ac
@@ -0,0 +1,75 @@
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+AC_PREREQ(2.61)
+AC_INIT(timemmgr, 2.00, http://www.ti.com)
+AC_CONFIG_SRCDIR([memmgr.c])
+AC_CONFIG_HEADER([config.h])
+AC_CONFIG_MACRO_DIR([m4])
+AC_CONFIG_AUX_DIR([config])
+AC_CANONICAL_SYSTEM
+AM_INIT_AUTOMAKE([timemmgr], [2.00])
+LT_INIT
+
+# Checks for programs.
+AC_PROG_CC
+AM_PROG_LIBTOOL
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS([fcntl.h stdint.h stdlib.h string.h sys/ioctl.h unistd.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
+AC_TYPE_UINT16_T
+AC_TYPE_UINT32_T
+
+# Checks for library functions.
+AC_PROG_GCC_TRADITIONAL
+#AC_FUNC_MALLOC
+#AC_FUNC_MMAP
+AC_CHECK_FUNCS([munmap strerror])
+
+AC_ARG_ENABLE(tilermgr,
+[ --enable-tilermgr Include TilerMgr headers],
+[case "${enableval}" in
+ yes) tilermgr=true ;;
+ no) tilermgr=false ;;
+ *) AC_MSG_ERROR(bad value ${enableval} for --enable-tilermgr) ;;
+esac],[tilermgr=true])
+
+AC_ARG_ENABLE(tests,
+[ --enable-tests Build unit tests],
+[case "${enableval}" in
+ yes) tests=true ;;
+ no) tests=false ;;
+ *) AC_MSG_ERROR(bad value ${enableval} for --enable-tests) ;;
+esac],[tests=false])
+
+AM_CONDITIONAL(UNIT_TESTS, test x$tests = xtrue)
+
+AM_CONDITIONAL(TILERMGR, test x$tilermgr != xfalse)
+
+AC_ARG_ENABLE(stub,
+[ --enable-stub Stubbing tiler and syslink on none-arm system],
+[case "${enableval}" in
+ yes) stub_tiler=true ;;
+ no) stub_tiler=false ;;
+ *) AC_MSG_ERROR(bad value ${enableval} for --enable-stub) ;;
+esac],[stub_tiler=false])
+
+AM_CONDITIONAL(STUB_TILER, test x$stub_tiler = xtrue)
+
+if test x$stub_tiler = xtrue; then
+AC_DEFINE([STUB_TILER],[1],[Use tiler stub])
+fi
+
+# Project build flags
+MEMMGR_CFLAGS="-Werror -Wall -pipe -ansi"
+AC_SUBST(MEMMGR_CFLAGS)
+
+AC_CONFIG_FILES([libtimemmgr.pc Makefile])
+AC_OUTPUT
+
diff --git a/tiler/debug_utils.h b/tiler/debug_utils.h
new file mode 100644
index 0000000..316e80e
--- /dev/null
+++ b/tiler/debug_utils.h
@@ -0,0 +1,211 @@
+/*
+ * debug_utils.h
+ *
+ * Debug Utility definitions.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DEBUG_UTILS_H_
+#define _DEBUG_UTILS_H_
+
+#include <stdio.h>
+
+/*#define __DEBUG__*/
+/*#define __DEBUG_ENTRY__*/
+/*#define __DEBUG_ASSERT__*/
+
+/* ---------- Generic Debug Print Macros ---------- */
+
+/**
+ * Use as:
+ * P("val is %d", 5);
+ * ==> val is 5
+ * DP("val is %d", 15);
+ * ==> val is 5 at test.c:56:main()
+ */
+/* debug print (fmt must be a literal); adds new-line. */
+#define __DEBUG_PRINT(fmt, ...) do { fprintf(stdout, fmt "\n", ##__VA_ARGS__); fflush(stdout); } while(0)
+
+/* debug print with context information (fmt must be a literal) */
+#define __DEBUG_DPRINT(fmt, ...) __DEBUG_PRINT(fmt " at %s(" __FILE__ ":%d)", ##__VA_ARGS__, __FUNCTION__, __LINE__)
+
+#ifdef __DEBUG__
+#define P(fmt, ...) __DEBUG_PRINT(fmt, ##__VA_ARGS__)
+#define DP(fmt, ...) __DEBUG_DPRINT(fmt, ##__VA_ARGS__)
+#else
+#define P(fmt, ...)
+#define DP(fmt, ...)
+#endif
+
+/* ---------- Program Flow Debug Macros ---------- */
+
+/**
+ * Use as:
+ * int get5() {
+ * IN;
+ * return R_I(5);
+ * }
+ * void check(int i) {
+ * IN;
+ * if (i == 5) { RET; return; }
+ * OUT;
+ * }
+ * void main() {
+ * IN;
+ * check(get5());
+ * check(2);
+ * OUT;
+ * }
+ * ==>
+ * in main(test.c:11)
+ * in get5(test.c:2)
+ * out(5) at get5(test.c:3)
+ * in check(test.c:6)
+ * out() at check(test.c:7)
+ * in check(test.c:6)
+ * out check(test.c:8)
+ * out main(test.c:14)
+ */
+
+#ifdef __DEBUG_ENTRY__
+/* function entry */
+#define IN __DEBUG_PRINT("in %s(" __FILE__ ":%d)", __FUNCTION__, __LINE__)
+/* function exit */
+#define OUT __DEBUG_PRINT("out %s(" __FILE__ ":%d)", __FUNCTION__, __LINE__)
+/* function abort (return;) Use as { RET; return; } */
+#define RET __DEBUG_DPRINT("out() ")
+/* generic function return */
+#define R(val,type,fmt) ({ type __val__ = (type) val; __DEBUG_DPRINT("out(" fmt ")", __val__); __val__; })
+#else
+#define IN
+#define OUT
+#define RET
+#define R(val,type,fmt) (val)
+#endif
+
+/* integer return */
+#define R_I(val) R(val,int,"%d")
+/* pointer return */
+#define R_P(val) R(val,void *,"%p")
+/* long return */
+#define R_UP(val) R(val,long,"0x%lx")
+
+/* ---------- Assertion Debug Macros ---------- */
+
+/**
+ * Use as:
+ * int i = 5;
+ * // int j = i * 5;
+ * int j = A_I(i,==,3) * 5;
+ * // if (i > 3) P("bad")
+ * if (NOT_I(i,<=,3)) P("bad")
+ * P("j is %d", j);
+ * ==> assert: i (=5) !== 3 at test.c:56:main()
+ * assert: i (=5) !<= 3 at test.c:58:main()
+ * j is 25
+ *
+ */
+/* generic assertion check, A returns the value of exp, CHK return void */
+#ifdef __DEBUG_ASSERT__
+#define A(exp,cmp,val,type,fmt) ({ \
+ type __exp__ = (type) (exp); type __val__ = (type) (val); \
+ if (!(__exp__ cmp __val__)) __DEBUG_DPRINT("assert: %s (=" fmt ") !" #cmp " " fmt, #exp, __exp__, __val__); \
+ __exp__; \
+})
+#define CHK(exp,cmp,val,type,fmt) do { \
+ type __exp__ = (type) (exp); type __val__ = (type) (val); \
+ if (!(__exp__ cmp __val__)) __DEBUG_DPRINT("assert: %s (=" fmt ") !" #cmp " " fmt, #exp, __exp__, __val__); \
+} while(0)
+#else
+#define A(exp,cmp,val,type,fmt) (exp)
+#define CHK(exp,cmp,val,type,fmt)
+#endif
+
+/* typed assertions */
+#define A_I(exp,cmp,val) A(exp,cmp,val,int,"%d")
+#define A_L(exp,cmp,val) A(exp,cmp,val,long,"%ld")
+#define A_P(exp,cmp,val) A(exp,cmp,val,void *,"%p")
+#define CHK_I(exp,cmp,val) CHK(exp,cmp,val,int,"%d")
+#define CHK_L(exp,cmp,val) CHK(exp,cmp,val,long,"%ld")
+#define CHK_P(exp,cmp,val) CHK(exp,cmp,val,void *,"%p")
+
+/* generic assertion check, returns true iff assertion fails */
+#ifdef __DEBUG_ASSERT__
+#define NOT(exp,cmp,val,type,fmt) ({ \
+ type __exp__ = (type) (exp); type __val__ = (type) (val); \
+ if (!(__exp__ cmp __val__)) __DEBUG_DPRINT("assert: %s (=" fmt ") !" #cmp " " fmt, #exp, __exp__, __val__); \
+ !(__exp__ cmp __val__); \
+})
+#else
+#define NOT(exp,cmp,val,type,fmt) (!((exp) cmp (val)))
+#endif
+
+/* typed assertion checks */
+#define NOT_I(exp,cmp,val) NOT(exp,cmp,val,int,"%d")
+#define NOT_L(exp,cmp,val) NOT(exp,cmp,val,long,"%ld")
+#define NOT_P(exp,cmp,val) NOT(exp,cmp,val,void *,"%p")
+
+
+/* system assertions - will use perror to give external error information */
+#ifdef __DEBUG_ASSERT__
+#define ERR_S(fmt, ...) do { fprintf(stderr, fmt " at %s(" __FILE__ ":%d", ##__VA_ARGS__, __FUNCTION__, __LINE__); perror(")"); fflush(stderr); } while(0)
+#define A_S(exp,cmp,val) ({ \
+ int __exp__ = (int) (exp); int __val__ = (int) (val); \
+ if (!(__exp__ cmp __val__)) ERR_S("assert: %s (=%d) !" #cmp " %d", #exp, __exp__, __val__); \
+ __exp__; \
+})
+#define CHK_S(exp,cmp,val) do { \
+ int __exp__ = (int) (exp); int __val__ = (int) (val); \
+ if (!(__exp__ cmp __val__)) ERR_S("assert: %s (=%d) !" #cmp " %d", #exp, __exp__, __val__); \
+} while(0)
+#define NOT_S(exp,cmp,val) ({ \
+ int __exp__ = (int) (exp); int __val__ = (int) (val); \
+ if (!(__exp__ cmp __val__)) ERR_S("assert: %s (=%d) !" #cmp " %d", #exp, __exp__, __val__); \
+ !(__exp__ cmp __val__); \
+})
+#else
+#define A_S(exp,cmp,val) (exp)
+#define CHK_S(exp,cmp,val)
+#define NOT_S(exp,cmp,val) (!((exp) cmp (val)))
+#endif
+
+/* error propagation macros - these macros ensure evaluation of the expression
+ even if there was a prior error */
+
+/* new error is accumulated into error */
+#define ERR_ADD(err, exp) do { int __error__ = A_I(exp,==,0); err = err ? err : __error__; } while(0)
+#define ERR_ADD_S(err, exp) do { int __error__ = A_S(exp,==,0); err = err ? err : __error__; } while(0)
+/* new error overwrites old error */
+#define ERR_OVW(err, exp) do { int __error__ = A_I(exp,==,0); err = __error__ ? __error__ : err; } while(0)
+#define ERR_OVW_S(err, exp) do { int __error__ = A_S(exp,==,0); err = __error__ ? __error__ : err; } while(0)
+
+#endif
+
diff --git a/tiler/fill_utr.py b/tiler/fill_utr.py
new file mode 100755
index 0000000..d006298
--- /dev/null
+++ b/tiler/fill_utr.py
@@ -0,0 +1,92 @@
+# Use this script to fill out the UTR for MemMgr/D2C tests. You can use
+# either Python 2.6 or 3.1
+#
+# Limitations:
+# - only basic tests run by memmgr_test and d2c_test are filled out
+# - negative memmgr_tests are performed in one step and this script will
+# not separate which part of the test failed. If your negative memmgr
+# tests fail, you want to manually check which test case failed.
+
+# Usage:
+# 1. run memmgr_test and d2c_test, and pipe the output of the tests into a
+# file.
+#
+# memmgr_test > test.log
+# d2c_test >> test.log
+#
+# 2. open UTR and select Tiler/D2C User Space worksheet
+# 3. run this script by piping the test logs into the script, e.g.
+# python fill_utr.py < test.log
+
+import sys, re, win32com.client
+
+TCs, results = [], {}
+
+# gather test results from the log
+for line in sys.stdin:
+ # check for test description
+ desc = re.search("TEST #... - (.*)", line.strip())
+ if desc:
+ TCs.append(desc.group(1))
+
+ # check for any test result
+ result = re.search("==> TEST (.*)", line.strip())
+ if result:
+ result = result.group(1)
+ if result == 'OK':
+ result = 'P', ''
+ elif result == 'NOT AVAILABLE':
+ result = 'D', ''
+ else:
+ result = 'F', 'failed with error #' + result[5:-1]
+
+ results[TCs.pop(0)] = result
+
+assert "test_d2c(1920, 1080, 2)" in results
+
+# populate test results
+excel = win32com.client.Dispatch('Excel.Application')
+if not excel:
+ print('Could not find Excel application. The UTR is probably not open in Excel.')
+ sys.exit(1)
+
+sheet = excel.ActiveSheet
+if sheet.Name != 'Tiler' or sheet.Cells(1,1).Text != 'Test Metrics':
+ print('Active worksheet does not seem to be the Tiler UTR')
+ sys.exit(1)
+
+# find Header
+for r in range(1, 100):
+ if sheet.Cells(r, 1).Text == 'Test Case\nID':
+ break
+else:
+ print('Could not find header row starting with "Test Case\\nID"')
+ sys.exit(1)
+
+# find Status, Details and Comments columns
+heads = t_status, t_details, t_comments, t_desc = 'Status', 'Details', 'Comments', 'Test Case\nDescription'
+heads = set(heads)
+cols = {}
+for c in range(1, 100):
+ if sheet.Cells(r, c).Text in heads:
+ cols[sheet.Cells(r, c).Text] = c
+ heads.remove(sheet.Cells(r, c).Text)
+if heads:
+ print('Could not find columns', heads)
+ sys.exit(1)
+
+# populate sheet, quit after 20 empty Details/Description cells
+empties = 0
+while empties < 20:
+ r += 1
+ details = sheet.Cells(r, cols[t_details]).Text
+ if details:
+ empties = 0
+ # get test case description
+ desc = details.split('\n')[-1]
+ if not desc in results:
+ results[desc] = 'U', ''
+ sheet.Cells(r, cols[t_status]).Value, sheet.Cells(r, cols[t_comments]).Value = results[desc]
+ elif not sheet.Cells(r, cols[t_desc]).Text:
+ empties += 1
+
diff --git a/tiler/libtimemmgr.pc.in b/tiler/libtimemmgr.pc.in
new file mode 100644
index 0000000..7ecd886
--- /dev/null
+++ b/tiler/libtimemmgr.pc.in
@@ -0,0 +1,13 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: timemmgr
+Description: TI memory manager
+Version: @VERSION@
+Requires:
+Libs: -L${libdir} -ltimemmgr
+Cflags: -I${includedir}/timemmgr
+
+
diff --git a/tiler/list_utils.h b/tiler/list_utils.h
new file mode 100644
index 0000000..1643cf0
--- /dev/null
+++ b/tiler/list_utils.h
@@ -0,0 +1,511 @@
+/*
+ * list_utils.h
+ *
+ * Utility definitions for the Memory Interface for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIST_UTILS_H_
+#define _LIST_UTILS_H_
+
+/*
+
+ DLIST macros facilitate double-linked lists in a generic fashion.
+
+ - Double-linked lists simplify list manipulations, so they are preferred to
+ single-linked lists.
+
+ - Having a double-linked list with a separate header allow accessing the
+ last element of the list easily, so this is also preferred to a double
+ linked list that uses NULL pointers at its ends.
+
+ Therefore, the following is required: a list info structure (separate from
+ the element structure, although the info structure can be a member of the
+ element structure). The info structure must contain (at least) the
+ following 3 members:
+
+ *next, *last: as pointers to the info structure. These are the next and
+ previous elements in the list.
+ *me: as a pointer to the element structure. This is how we get to the
+ element itself, as next and last points to another info structure.
+
+ The list element structure __MAY__ contain the info structure as a member,
+ or a pointer to the info structure if it is desired to be kept separately.
+ In such cases macros are provided to add the elements directly to the list,
+ and automatically set up the info structure fields correctly. You can also
+ iterate through the elements of the list using a pointer to the elements
+ itself, as the list structure can be obtained for each element.
+
+ Otherwise, macros are provided to manipulate list info structures and
+ link them in any shape or form into double linked lists. This allows
+ having NULL values as members of such lists.
+
+ :NOTE: If you use a macro with a field argument, you must not have NULL
+ elements because the field of any element must be/point to the list
+ info structure
+
+ DZLIST macros
+
+ Having the list info structure separate from the element structure also
+ allows to link elements into many separate lists (with separate info
+ structure fields/pointers). However, for cases where only a single list
+ is desired, a set of easy macros are also provided.
+
+ These macros combine the element and list structures. These macros
+ require the following 2 members in each element structure:
+
+ *next, *last: as pointers to the element structure. These are the next and
+ previous elements in the list.
+
+ :NOTE: In case of the DZLIST-s, the head of the list must be an element
+ structure, where only the next and last members are used. */
+
+/*
+ Usage:
+
+ DLIST macros are designed for preallocating the list info structures, e.g. in
+ an array. This is why most macros take a list info structure, and not a
+ pointer to a list info structure.
+
+ Basic linked list consists of element structure and list info structure
+
+ struct elem {
+ int data;
+ } *elA, *elB;
+ struct list {
+ struct elem *me;
+ struct list *last, *next;
+ } head, *inA, *inB, *in;
+
+ DLIST_INIT(head); // initialization -> ()
+ DLIST_IS_EMPTY(head) == TRUE; // emptiness check
+
+ // add element at beginning of list -> (1)
+ elA = NEW(struct elem);
+ elA->data = 1;
+ inA = NEW(struct list);
+ DLIST_ADD_AFTER(head, elA, *inA);
+
+ // add before an element -> (2, 1)
+ elB = NEW(struct elem);
+ elB->data = 2;
+ inB = NEW(struct list);
+ DLIST_ADD_BEFORE(*inA, elB, *inB);
+
+ // move an element to another position or another list -> (1, 2)
+ DLIST_MOVE_BEFORE(head, *inB);
+
+ // works even if the position is the same -> (1, 2)
+ DLIST_MOVE_BEFORE(head, *inB);
+
+ // get first and last elements
+ DLIST_FIRST(head) == elA;
+ DLIST_LAST(head) == elB;
+
+ // loop through elements
+ DLIST_LOOP(head, in) {
+ P("%d", in->me->data);
+ }
+
+ // remove element -> (2)
+ DLIST_REMOVE(*inA);
+ FREE(elA);
+ FREE(inA);
+
+ // delete list
+ DLIST_SAFE_LOOP(head, in, inA) {
+ DLIST_REMOVE(*in);
+ FREE(in->me);
+ FREE(in);
+ }
+
+ You can combine the element and list info structures to create an easy list,
+ but you still need to specify both element and info structure while adding
+ elements.
+
+ struct elem {
+ int data;
+ struct elem *me, *last, *next;
+ } head, *el, *elA, *elB;
+
+ DLIST_INIT(head); // initialization -> ()
+ DLIST_IS_EMPTY(head) == TRUE; // emptiness check
+
+ // add element at beginning of list -> (1)
+ elA = NEW(struct elem);
+ elA->data = 1;
+ DLIST_ADD_AFTER(head, elA, *elA);
+
+ // add before an element -> (2, 1)
+ elB = NEW(struct elem);
+ elB->data = 2;
+ DLIST_ADD_BEFORE(*elA, elB, *elB);
+
+ // move an element to another position or another list -> (1, 2)
+ DLIST_MOVE_BEFORE(head, *elB);
+
+ // works even if the position is the same -> (1, 2)
+ DLIST_MOVE_BEFORE(head, *elB);
+
+ // get first and last elements
+ DLIST_FIRST(head) == elA;
+ DLIST_LAST(head) == elB;
+
+ // loop through elements
+ DLIST_LOOP(head, el) {
+ P("%d", el->data);
+ }
+
+ // remove element -> (2)
+ DLIST_REMOVE(*elA);
+ FREE(elA);
+
+ // delete list
+ DLIST_SAFE_LOOP(head, el, elA) {
+ DLIST_REMOVE(*el);
+ FREE(el);
+ }
+
+ Or, you can use a DZLIST.
+
+ struct elem {
+ int data;
+ struct elem *last, *next;
+ } head, *el, *elA, *elB;
+
+ DZLIST_INIT(head); // initialization -> ()
+ DZLIST_IS_EMPTY(head) == TRUE; // emptiness check
+
+ // add element at beginning of list -> (1)
+ elA = NEW(struct elem);
+ elA->data = 1;
+ DZLIST_ADD_AFTER(head, *elA);
+
+ // add before an element -> (2, 1)
+ elB = NEW(struct elem);
+ elB->data = 2;
+ DZLIST_ADD_BEFORE(*elA, *elB);
+
+ // move an element to another position or another list -> (1, 2)
+ DZLIST_MOVE_BEFORE(head, *elB);
+
+ // works even if the position is the same -> (1, 2)
+ DZLIST_MOVE_BEFORE(head, *elB);
+
+ // get first and last elements
+ DZLIST_FIRST(head) == elA;
+ DZLIST_LAST(head) == elB;
+
+ // loop through elements
+ DZLIST_LOOP(head, el) {
+ P("%d", el->data);
+ }
+
+ // remove element -> (2)
+ DZLIST_REMOVE(*elA);
+ FREE(elA);
+
+ // delete list
+ DZLIST_SAFE_LOOP(head, el, elA) {
+ DZLIST_REMOVE(*el);
+ FREE(el);
+ }
+
+ A better way to get to the list structure from the element structure is to
+ enclose a pointer the list structure in the element structure. This allows
+ getting to the next/previous element from the element itself.
+
+ struct elem;
+ struct list {
+ struct elem *me;
+ struct list *last, *next;
+ } head, *inA, *inB, *in;
+ struct elem {
+ int data;
+ struct list *list_data;
+ } *elA, *elB, *el;
+
+ // or
+
+ struct elem {
+ int data;
+ struct list {
+ struct elem *me;
+ struct list *last, *next;
+ } *list_data;
+ } *elA, *elB, *el;
+ struct list head, *inA, *inB, *in;
+
+ DLIST_INIT(head); // initialization -> ()
+ DLIST_IS_EMPTY(head) == TRUE; // emptiness check
+
+ // add element at beginning of list -> (1)
+ elA = NEW(struct elem);
+ elA->data = 1;
+ inA = NEW(struct list);
+ DLIST_PADD_AFTER(head, elA, inA, list_data);
+
+ // add before an element -> (2, 1)
+ elB = NEW(struct elem);
+ elB->data = 2;
+ inB = NEW(struct list);
+ DLIST_PADD_BEFORE(*elA, elB, inB, list_data);
+
+ // move an element to another position or another list -> (1, 2)
+ DLIST_MOVE_BEFORE(head, *inB);
+
+ // works even if the position is the same -> (1, 2)
+ DLIST_MOVE_BEFORE(head, *inB);
+
+ // get first and last elements
+ DLIST_FIRST(head) == elA;
+ DLIST_LAST(head) == elB;
+
+ // loop through elements
+ DLIST_LOOP(head, in) {
+ P("%d", in->me->data);
+ }
+ DLIST_PLOOP(head, el, list_data) {
+ P("%d", el->data);
+ }
+
+ // remove element
+ DLIST_REMOVE(*inA);
+ FREE(inA);
+ FREE(elA);
+
+ // delete list
+ DLIST_SAFE_PLOOP(head, el, elA, list_data) {
+ DLIST_REMOVE(*el->list_data);
+ FREE(el->list_data);
+ FREE(el);
+ }
+
+ Lastly, you can include the list data in the element structure itself.
+
+ struct elem {
+ int data;
+ struct list {
+ struct list *last, *next;
+ struct elem *me;
+ } list_data;
+ } *elA, *elB, *el;
+ struct list head, *in;
+
+ DLIST_INIT(head); // initialization -> ()
+ DLIST_IS_EMPTY(head) == TRUE; // emptiness check
+
+ // add element at beginning of list -> (1)
+ elA = NEW(struct elem);
+ elA->data = 1;
+ DLIST_MADD_AFTER(head, elA, list_data);
+
+ // add before an element -> (2, 1)
+ elB = NEW(struct elem);
+ elB->data = 2;
+ DLIST_PADD_BEFORE(elA->list_data, elB, list_data);
+
+ // move an element to another position or another list -> (1, 2)
+ DLIST_MOVE_BEFORE(head, elB->list_data);
+
+ // works even if the position is the same -> (1, 2)
+ DLIST_MOVE_BEFORE(head, elB->list_data);
+
+ // get first and last elements
+ DLIST_FIRST(head) == elA;
+ DLIST_LAST(head) == elB;
+
+ // loop through elements
+ DLIST_LOOP(head, in) {
+ P("%d", in->me->data);
+ }
+ DLIST_MLOOP(head, el, list_data) {
+ P("%d", el->data);
+ }
+
+ // remove element
+ DLIST_REMOVE(elA->list_data);
+ FREE(elA);
+
+ // delete list
+ DLIST_SAFE_MLOOP(head, el, elA, list_data) {
+ DLIST_REMOVE(el->list_data);
+ FREE(el);
+ }
+
+ */
+
+/* -- internal (generic direction) macros -- */
+#define DLIST_move__(base, info, next, last) \
+ ((info).last = &base)->next = ((info).next = (base).next)->last = &(info)
+#define DLIST_padd__(base, pInfo, pElem, pField, next, last) \
+ ((pInfo)->last = &base)->next = ((pInfo)->next = (base).next)->last = \
+ pElem->pField = pInfo
+#define DLIST_loop__(head, pInfo, next) \
+ for (pInfo=(head).next; pInfo != &(head); pInfo = (pInfo)->next)
+#define DLIST_ploop__(head, pElem, pField, next) \
+ for (pElem=(head).next->me; pElem; pElem = (pElem)->pField->next->me)
+#define DLIST_mloop__(head, pElem, mField, next) \
+ for (pElem=(head).next->me; pElem; pElem = (pElem)->mField.next->me)
+#define DLIST_safe_loop__(head, pInfo, pInfo_safe, next) \
+ for (pInfo=(head).next; pInfo != &(head); pInfo = pInfo_safe) \
+ if ((pInfo_safe = (pInfo)->next) || 1)
+#define DLIST_safe_ploop__(head, pElem, pElem_safe, pField, next) \
+ for (pElem=(head).next->me; pElem; pElem = pElem_safe) \
+ if ((pElem_safe = (pElem)->pField->next->me) || 1)
+#define DLIST_safe_mloop__(head, pElem, pElem_safe, mField, next) \
+ for (pElem=(head).next->me; pElem; pElem = pElem_safe) \
+ if ((pElem_safe = (pElem)->mField.next->me) || 1)
+
+#define DLIST_IS_EMPTY(head) ((head).next == &(head))
+
+/* Adds the element (referred to by the info structure) before/or after another
+ element (or list header) (base). */
+
+#define DLIST_ADD_AFTER(base, pElem, info) \
+ (DLIST_move__(base, info, next, last))->me = pElem
+#define DLIST_ADD_BEFORE(base, pElem, info) \
+ (DLIST_move__(base, info, last, next))->me = pElem
+
+/* Adds the element (referred to by pElem pointer) along with its info
+ structure (referred to by pInfo pointer) before/or after an element or
+ list header (base). It also sets up the list structure header to point to
+ the element as well as the element's field to point back to the list info
+ structure. */
+#define DLIST_PADD_BEFORE(base, pElem, pInfo, pField) \
+ (DLIST_padd__(base, pInfo, pElem, pField, last, next))->me = pElem
+#define DLIST_PADD_AFTER(base, pElem, pInfo, pField) \
+ (DLIST_padd__(base, pInfo, pElem, pField, next, last))->me = pElem
+
+/* Adds the element (referred to by pElem pointer) before/or after an element or
+ list header (base). It also sets up the list structure header (which is a
+ member of the element's structure) to point to the element. */
+#define DLIST_MADD_BEFORE(base, pElem, mField) \
+ (DLIST_move__(base, pElem->mField, last, next))->me = pElem
+#define DLIST_MADD_AFTER(base, pElem, mField) \
+ (DLIST_move__(base, pElem->mField, next, last))->me = pElem
+
+/* Removes the element (referred to by the info structure) from its current
+ list. This requires that the element is a part of a list.
+
+ :NOTE: the info structure will still think that it belongs to the list it
+ used to belong to. However, the old list will not contain this element any
+ longer. You want to discard the info/element after this call. Otherwise,
+ you can use one of the MOVE macros to also add the item to another list,
+ or another place in the same list. */
+#define DLIST_REMOVE(info) ((info).last->next = (info).next)->last = (info).last
+
+/* Initializes the list header (to an empty list) */
+#define DLIST_INIT(head) \
+ do { (head).me = NULL; (head).next = (head).last = &(head); } while(0)
+
+/* These functions move an element (referred to by the info structure) before
+ or after another element (or the list head).
+ :NOTE: This logic also works for moving an element after/before itself. */
+#define DLIST_MOVE_AFTER(base, info) \
+ do { DLIST_REMOVE(info); DLIST_move__(base, info, next, last); } while(0)
+#define DLIST_MOVE_BEFORE(base, info) \
+ do { DLIST_REMOVE(info); DLIST_move__(base, info, last, next); } while(0)
+
+/* Loops behave syntactically as a for() statement. They traverse the loop
+ variable from the 1st to the last element (or in the opposite direction in
+ case of RLOOP). There are 3 flavors of loops depending on the type of the
+ loop variable.
+
+ DLIST_LOOP's loop variable is a pointer to the list info structure. You can
+ get to the element by using the 'me' member. Nonetheless, this loop
+ construct allows having NULL elements in the list.
+
+ DLIST_MLOOP's loop variable is a pointer to a list element. mField is the
+ field of the element containing the list info structure. Naturally, this
+ list cannot have NULL elements.
+
+ DLIST_PLOOP's loop variable is also a pointer to a list element. Use this
+ construct if the element contains a pointer to the list info structure
+ instead of embedding it directly into the element structure.
+
+*/
+#define DLIST_LOOP(head, pInfo) DLIST_loop__(head, pInfo, next)
+#define DLIST_RLOOP(head, pInfo) DLIST_loop__(head, pInfo, last)
+#define DLIST_MLOOP(head, pElem, mField) \
+ DLIST_mloop__(head, pElem, mField, next)
+#define DLIST_RMLOOP(head, pElem, mField) \
+ DLIST_mloop__(head, pElem, mField, last)
+#define DLIST_PLOOP(head, pElem, pField) \
+ DLIST_ploop__(head, pElem, pField, next)
+#define DLIST_RPLOOP(head, pElem, pField) \
+ DLIST_ploop__(head, pElem, pField, last)
+
+/* Safe loops are like ordinary loops, but they allow removal of the current
+ element from the list. They require an extra loop variable that holds the
+ value of the next element in case the current element is moved/removed. */
+#define DLIST_SAFE_LOOP(head, pInfo, pInfo_safe) \
+ DLIST_safe_loop__(head, pInfo, pInfo_safe, next)
+#define DLIST_SAFE_RLOOP(head, pInfo, pInfo_safe) \
+ DLIST_safe_loop__(head, pInfo, pInfo_safe, last)
+#define DLIST_SAFE_MLOOP(head, pElem, pElem_safe, mField) \
+ DLIST_safe_mloop__(head, pElem, pElem_safe, mField, next)
+#define DLIST_SAFE_RMLOOP(head, pElem, pElem_safe, mField) \
+ DLIST_safe_mloop__(head, pElem, pElem_safe, mField, last)
+#define DLIST_SAFE_PLOOP(head, pElem, pElem_safe, pField) \
+ DLIST_safe_ploop__(head, pElem, pElem_safe, pField, next)
+#define DLIST_SAFE_RPLOOP(head, pElem, pElem_safe, pField) \
+ DLIST_safe_ploop__(head, pElem, pElem_safe, pField, last)
+
+/* returns the first element of a list */
+#define DLIST_FIRST(head) (head).next->me
+/* returns the last element of a list */
+#define DLIST_LAST(head) (head).last->me
+
+
+/* DZLIST equivalent API - provided so arguments are specified */
+#define DZLIST_INIT(head) do { (head).next = (head).last = &(head); } while(0)
+#define DZLIST_IS_EMPTY(head) DLIST_IS_EMPTY(head)
+#define DZLIST_FIRST(head) (head).next
+#define DZLIST_LAST(head) (head).last
+
+#define DZLIST_ADD_AFTER(base, elem) DLIST_move__(base, elem, next, last)
+#define DZLIST_ADD_BEFORE(base, elem) DLIST_move__(base, elem, last, next)
+
+#define DZLIST_REMOVE(elem) DLIST_REMOVE(elem)
+
+#define DZLIST_MOVE_AFTER(base, elem) DLIST_MOVE_AFTER(base, elem)
+#define DZLIST_MOVE_BEFORE(base, elem) DLIST_MOVE_BEFORE(base, elem)
+
+#define DZLIST_LOOP(head, pElem) DLIST_LOOP(head, pElem)
+#define DZLIST_RLOOP(head, pElem) DLIST_RLOOP(head, pElem)
+#define DZLIST_SAFE_LOOP(head, pElem, pElem_safe) \
+ DLIST_SAFE_LOOP(head, pElem, pElem_safe)
+#define DZLIST_SAFE_RLOOP(head, pElem, pElem_safe) \
+ DLIST_SAFE_RLOOP(head, pElem, pElem_safe)
+
+#endif
+
diff --git a/tiler/mem_types.h b/tiler/mem_types.h
new file mode 100644
index 0000000..18453bb
--- /dev/null
+++ b/tiler/mem_types.h
@@ -0,0 +1,101 @@
+/*
+ * types.h
+ *
+ * Type definitions for the Memory Interface for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MEM_TYPES_H_
+#define _MEM_TYPES_H_
+
+/* for bool definition */
+#include <stdbool.h>
+#include <stdint.h>
+
+/** ---------------------------------------------------------------------------
+ * Type definitions
+ */
+
+/**
+ * Buffer length in bytes
+ */
+typedef uint32_t bytes_t;
+
+/**
+ * Length in pixels
+ */
+typedef uint16_t pixels_t;
+
+
+/**
+ * Pixel format
+ *
+ * Page mode is encoded in the pixel format to handle different
+ * set of buffers uniformly
+ */
+enum pixel_fmt_t {
+ PIXEL_FMT_MIN = 0,
+ PIXEL_FMT_8BIT = 0,
+ PIXEL_FMT_16BIT = 1,
+ PIXEL_FMT_32BIT = 2,
+ PIXEL_FMT_PAGE = 3,
+ PIXEL_FMT_MAX = 3
+};
+
+typedef enum pixel_fmt_t pixel_fmt_t;
+
+/**
+ * Ducati Space Virtual Address Pointer
+ *
+ * This is handled as a unsigned long so that no dereferencing
+ * is allowed by user space components.
+ */
+typedef uint32_t DSPtr;
+
+/**
+ * System Space Pointer
+ *
+ * This is handled as a unsigned long so that no dereferencing
+ * is allowed by user space components.
+ */
+typedef uint32_t SSPtr;
+
+/**
+ * Error values
+ *
+ * Page mode is encoded in the pixel format to handle different
+ * set of buffers uniformly
+ */
+#define MEMMGR_ERR_NONE 0
+#define MEMMGR_ERR_GENERIC 1
+
+#endif
+
diff --git a/tiler/memmgr.c b/tiler/memmgr.c
new file mode 100644
index 0000000..1f780cc
--- /dev/null
+++ b/tiler/memmgr.c
@@ -0,0 +1,1078 @@
+/*
+ * memmgr.c
+ *
+ * Memory Allocator Interface functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <errno.h>
+
+#define BUF_ALLOCED 1
+#define BUF_MAPPED 2
+#define BUF_ANY ~0
+
+#include <tiler.h>
+
+typedef struct tiler_block_info tiler_block_info;
+
+#define __DEBUG__
+#undef __DEBUG_ENTRY__
+#define __DEBUG_ASSERT__
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+#include "utils.h"
+#include "list_utils.h"
+#include "debug_utils.h"
+#include "tilermem.h"
+#include "tilermem_utils.h"
+#include "memmgr.h"
+
+/* list of allocations */
+struct _AllocData {
+ struct tiler_buf_info buf;
+ int buf_type;
+ struct _AllocList {
+ struct _AllocList *next, *last;
+ struct _AllocData *me;
+ } link;
+};
+static struct _AllocList bufs = {0};
+static int bufs_inited = 0;
+
+typedef struct _AllocList _AllocList;
+typedef struct _AllocData _AllocData;
+
+static int refCnt = 0;
+static int td = -1;
+static pthread_mutex_t ref_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t che_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/**
+ * Initializes the static structures
+ *
+ * @author a0194118 (9/8/2009)
+ */
+static void init()
+{
+ if (!bufs_inited)
+ {
+ DLIST_INIT(bufs);
+ bufs_inited = 1;
+ }
+}
+
+/**
+ * Increases the reference count. Initialized tiler if this was
+ * the first reference
+ *
+ * @author a0194118 (9/2/2009)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int inc_ref()
+{
+ /* initialize tiler on first call */
+ pthread_mutex_lock(&ref_mutex);
+
+ int res = MEMMGR_ERR_NONE;
+
+ if (!refCnt++) {
+ /* initialize lists */
+ init();
+#ifndef STUB_TILER
+ td = open("/dev/tiler", O_RDWR | O_SYNC);
+ if (NOT_I(td,>=,0)) res = MEMMGR_ERR_GENERIC;
+#else
+ td = 2;
+ res = MEMMGR_ERR_NONE;
+#endif
+ }
+ if (res)
+ {
+ refCnt--;
+ }
+
+ pthread_mutex_unlock(&ref_mutex);
+ return res;
+}
+
+/**
+ * Decreases the reference count. Deinitialized tiler if this
+ * was the last reference
+ *
+ * @author a0194118 (9/2/2009)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int dec_ref()
+{
+ pthread_mutex_lock(&ref_mutex);
+
+ int res = MEMMGR_ERR_NONE;;
+
+ if (refCnt <= 0) res = MEMMGR_ERR_GENERIC;
+ else if (!--refCnt) {
+#ifndef STUB_TILER
+ close(td);
+ td = -1;
+#endif
+ }
+
+ pthread_mutex_unlock(&ref_mutex);
+ return res;
+}
+
+/**
+ * Returns the default page stride for this block
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param width Width of 2D container in bytes
+ *
+ * @return Stride
+ */
+static bytes_t def_stride(bytes_t width)
+{
+ return ROUND_UP_TO2POW(width, PAGE_SIZE);
+}
+
+/**
+ * Returns the bytes per pixel for the pixel format.
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param pixelFormat Pixelformat
+ *
+ * @return Bytes per pixel
+ */
+static bytes_t def_bpp(pixel_fmt_t pixelFormat)
+{
+ return (pixelFormat == PIXEL_FMT_32BIT ? 4 :
+ pixelFormat == PIXEL_FMT_16BIT ? 2 : 1);
+}
+
+/**
+ * Returns the size of the supplied block
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param blk Pointer to the tiler_block_info struct
+ *
+ * @return size of the block in bytes
+ */
+static bytes_t def_size(tiler_block_info *blk)
+{
+ return (blk->fmt == PIXEL_FMT_PAGE ?
+ def_stride(blk->dim.len) :
+ blk->dim.area.height * def_stride(blk->dim.area.width * def_bpp(blk->fmt)));
+}
+
+/**
+ * Returns the map size based on an offset and buffer size
+ *
+ * @author a0194118 (7/8/2010)
+ *
+ * @param size Size of buffer
+ * @param offs Buffer offset
+ *
+ * @return (page aligned) size for mapping
+ */
+static bytes_t map_size(bytes_t size, bytes_t offs)
+{
+ return def_stride(size + (offs & (PAGE_SIZE - 1)));
+}
+
+/**
+ * Records a buffer-pointer -- tiler-ID mapping for a specific
+ * buffer type.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param bufPtr Buffer pointer
+ * @param tiler_id Tiler ID
+ * @param buf_type Buffer type: BUF_ALLOCED or BUF_MAPPED
+ *
+ * @return 0 on success, -ENOMEM on memory allocation failure
+ */
+static int buf_cache_add(struct tiler_buf_info *buf, int buf_type)
+{
+ pthread_mutex_lock(&che_mutex);
+ _AllocData *ad = NEW(_AllocData);
+ if (ad)
+ {
+ memcpy(&ad->buf, buf, sizeof(ad->buf));
+ ad->buf_type = buf_type;
+ DLIST_MADD_BEFORE(bufs, ad, link);
+ }
+ pthread_mutex_unlock(&che_mutex);
+ return ad == NULL ? -ENOMEM : 0;
+}
+
+/**
+ * Retrieves the tiler ID for given pointer and buffer type from
+ * the records. If the pointer lies within a tracked buffer,
+ * the tiler ID is returned. Otherwise 0 is returned.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param bufPtr Buffer pointer
+ * @param buf_type Buffer type: BUF_ALLOCED or BUF_MAPPED
+ *
+ * @return Tiler ID on success, 0 on failure.
+ */
+static void buf_cache_query(void *ptr, struct tiler_buf_info *buf,
+ int buf_type_mask)
+{
+ IN;
+ if(0) DP("in(p=%p,t=%d,bp*=%p)", ptr, buf_type_mask, buf->blocks[0].ptr);
+ _AllocData *ad;
+ pthread_mutex_lock(&che_mutex);
+ DLIST_MLOOP(bufs, ad, link) {
+ if(0) {
+ DP("got(%p-%p,%d)", ad->buf.blocks->ptr, ad->buf.blocks->ptr + ad->buf.length, ad->buf_type);
+ CHK_P(ad->buf.blocks->ptr,<=,ptr);
+ CHK_P(ptr,<,ad->buf.blocks->ptr + ad->buf.length);
+ CHK_I(ad->buf_type & buf_type_mask,!=,0);
+ CHK_P(buf->blocks->ptr,!=,0);
+ }
+ if ((ad->buf_type & buf_type_mask) &&
+ ad->buf.blocks->ptr <= ptr && ptr < ad->buf.blocks->ptr + ad->buf.length) {
+ memcpy(buf, &ad->buf, sizeof(*buf));
+ pthread_mutex_unlock(&che_mutex);
+ return;
+ }
+ }
+ pthread_mutex_unlock(&che_mutex);
+ ZERO(*buf);
+ OUT;
+}
+
+/**
+ * Retrieves the tiler ID for given buffer pointer and buffer
+ * type from the records. If the tiler ID is found, it is
+ * removed from the records as well.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param bufPtr Buffer pointer
+ * @param buf_type Buffer type: BUF_ALLOCED or BUF_MAPPED
+ *
+ * @return Tiler ID on success, 0 on failure.
+ */
+static void buf_cache_del(void *bufPtr, struct tiler_buf_info *buf,
+ int buf_type)
+{
+ _AllocData *ad;
+ pthread_mutex_lock(&che_mutex);
+ DLIST_MLOOP(bufs, ad, link) {
+ if (ad->buf.blocks->ptr == bufPtr && ad->buf_type == buf_type) {
+ memcpy(buf, &ad->buf, sizeof(*buf));
+ DLIST_REMOVE(ad->link);
+ FREE(ad);
+ pthread_mutex_unlock(&che_mutex);
+ return;
+ }
+ }
+ pthread_mutex_unlock(&che_mutex);
+ OUT;
+ return;
+}
+
+/**
+ * Checks the consistency of the internal record cache. The
+ * number of elements in the cache should equal to the number of
+ * references.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int cache_check()
+{
+ int num_bufs = 0;
+ pthread_mutex_lock(&che_mutex);
+
+ init();
+
+ _AllocData *ad;
+ DLIST_MLOOP(bufs, ad, link) { num_bufs++; }
+
+ pthread_mutex_unlock(&che_mutex);
+ return (num_bufs == refCnt) ? MEMMGR_ERR_NONE : MEMMGR_ERR_GENERIC;
+}
+
+static void dump_block(struct tiler_block_info *blk, char *prefix, char *suffix)
+{
+#if 0
+ switch (blk->fmt)
+ {
+ case TILFMT_PAGE:
+ P("%s [%d:(%d,%08x), p=%p(0x%x),l=0x%x,s=%d,%d+%d]%s", prefix, blk->group_id, blk->key, blk->id, blk->ptr, blk->ssptr,
+ blk->dim.len, blk->stride, blk->align, blk->offs, suffix);
+ break;
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ P("%s [%d:(%d,%08x), p=%p(0x%x),%d*%d*%d,s=%d,%d+%d]%s", prefix, blk->group_id, blk->key, blk->id, blk->ptr, blk->ssptr,
+ blk->dim.area.width, blk->dim.area.height, def_bpp(blk->fmt) * 8,
+ blk->stride, blk->align, blk->offs, suffix);
+ break;
+ default:
+ P("%s*[%d:(%d,%08x), p=%p(0x%x),l=0x%x,s=%d,%d+%d,fmt=0x%x]%s", prefix, blk->group_id, blk->key, blk->id, blk->ptr,
+ blk->ssptr, blk->dim.len, blk->stride, blk->align, blk->offs, blk->fmt, suffix);
+ }
+#endif
+}
+
+static void dump_buf(struct tiler_buf_info* buf, char* prefix)
+{
+#if 0
+ P("%sbuf={n=%d,id=0x%x,len=0x%x", prefix, buf->num_blocks, buf->offset, buf->length);
+ int ix = 0;
+ for (ix = 0; ix < buf->num_blocks; ix++)
+ {
+ dump_block(buf->blocks + ix, "", ix + 1 == buf->num_blocks ? "}" : "");
+ }
+#endif
+}
+
+/**
+ * Returns the tiler format for an address
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param ssptr Address
+ *
+ * @return The tiler format
+ */
+static enum tiler_fmt tiler_get_fmt(SSPtr ssptr)
+{
+#ifndef STUB_TILER
+ return (ssptr == 0 ? TILFMT_INVALID :
+ ssptr < TILER_MEM_8BIT ? TILFMT_NONE :
+ ssptr < TILER_MEM_16BIT ? TILFMT_8BIT :
+ ssptr < TILER_MEM_32BIT ? TILFMT_16BIT :
+ ssptr < TILER_MEM_PAGED ? TILFMT_32BIT :
+ ssptr < TILER_MEM_END ? TILFMT_PAGE : TILFMT_NONE);
+#else
+ /* if emulating, we need to get through all allocated memory segments */
+ pthread_mutex_lock(&che_mutex);
+ init();
+ _AllocData *ad;
+ void *ptr = (void *) ssptr;
+ if (!ptr) return TILFMT_INVALID;
+ /* P("?%p", (void *)ssptr); */
+ DLIST_MLOOP(bufs, ad, link) {
+ int ix;
+ struct tiler_buf_info *buf = (struct tiler_buf_info *) ad->buf.offset;
+ /* P("buf[%d]", buf->num_blocks); */
+ for (ix = 0; ix < buf->num_blocks; ix++)
+ {
+ /* P("block[%p-%p]", buf->blocks[ix].ptr, buf->blocks[ix].ptr + def_size(buf->blocks + ix)); */
+ if (ptr >= buf->blocks[ix].ptr &&
+ ptr < buf->blocks[ix].ptr + def_size(buf->blocks + ix)) {
+ enum tiler_fmt fmt = buf->blocks[ix].fmt;
+ pthread_mutex_unlock(&che_mutex);
+ return fmt;
+ }
+ }
+ }
+ pthread_mutex_unlock(&che_mutex);
+ return TILFMT_NONE;
+#endif
+}
+
+
+/**
+ * Allocates a memory block using tiler
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blk Pointer to the block info
+ *
+ * @return ssptr of block allocated, or 0 on error
+ */
+static int tiler_alloc(struct tiler_block_info *blk)
+{
+ dump_block(blk, "=(ta)=>", "");
+ blk->ptr = NULL;
+ int ret = A_S(ioctl(td, TILIOC_GBLK, blk),==,0);
+ dump_block(blk, "alloced: ", "");
+ return R_I(ret);
+}
+
+/**
+ * Frees a memory block using tiler
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blk Pointer to the block info
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int tiler_free(struct tiler_block_info *blk)
+{
+ return R_I(ioctl(td, TILIOC_FBLK, blk));
+}
+
+/**
+ * Maps a memory block into tiler using tiler
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blk Pointer to the block info
+ *
+ * @return ssptr of block mapped, or 0 on error
+ */
+static int tiler_map(struct tiler_block_info *blk)
+{
+ dump_block(blk, "=(tm)=>", "");
+ int ret = A_S(ioctl(td, TILIOC_MBLK, blk),==,0);
+ dump_block(blk, "mapped: ", "");
+ return R_I(ret);
+}
+
+/**
+ * Unmaps a memory block from tiler using tiler
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blk Pointer to the block info
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int tiler_unmap(struct tiler_block_info *blk)
+{
+ return ioctl(td, TILIOC_UMBLK, blk);
+}
+
+/**
+ * Registers a buffer structure with tiler, and maps the buffer
+ * into memory using tiler. On success, it writes the tiler ID
+ * of the buffer into the area pointed by tiler ID.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blks Pointer to array of block info structures
+ * @param num_blocks Number of blocks
+ * @param tiler_id Pointer to tiler ID.
+ *
+ * @return pointer to the mapped buffer.
+ */
+static void *tiler_mmap(struct tiler_block_info *blks, int num_blocks,
+ int buf_type)
+{
+ IN;
+
+ /* get size */
+ int ix;
+ bytes_t size;
+
+ /* register buffer with tiler */
+ struct tiler_buf_info buf;
+ buf.num_blocks = num_blocks;
+ /* work on copy in buf */
+ memcpy(buf.blocks, blks, sizeof(tiler_block_info) * num_blocks);
+#ifndef STUB_TILER
+ dump_buf(&buf, "==(RBUF)=>");
+ int ret = ioctl(td, TILIOC_RBUF, &buf);
+ dump_buf(&buf, "<=(RBUF)==");
+ if (NOT_I(ret,==,0)) return NULL;
+ size = buf.length;
+#else
+ /* save buffer in stub */
+ struct tiler_buf_info *buf_c = NEWN(struct tiler_buf_info,2);
+ buf.offset = (uint32_t) buf_c;
+#endif
+ if (NOT_P(buf.offset,!=,0)) return NULL;
+
+ /* map blocks to process space */
+#ifndef STUB_TILER
+ void *bufPtr = mmap(0, map_size(size, buf.offset),
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ td, buf.offset & ~(PAGE_SIZE - 1));
+ if (bufPtr == MAP_FAILED){
+ bufPtr = NULL;
+ } else {
+ bufPtr += buf.offset & (PAGE_SIZE - 1);
+ }
+ if(0) DP("ptr=%p", bufPtr);
+#else
+ void *bufPtr = malloc(size + PAGE_SIZE - 1);
+ buf_c[1].blocks[0].ptr = bufPtr;
+ bufPtr = ROUND_UP_TO2POW(bufPtr, PAGE_SIZE);
+ /* P("<= [0x%x]", size); */
+
+ /* fill out pointers - this is needed for caching 1D/2D type */
+ for (size = ix = 0; ix < num_blocks; ix++)
+ {
+ buf.blocks[ix].ptr = bufPtr ? bufPtr + size : bufPtr;
+ size += def_size(blks + ix);
+ }
+
+ memcpy(buf_c, &buf, sizeof(struct tiler_buf_info));
+#endif
+
+ if (bufPtr != NULL)
+ {
+ /* fill out pointers */
+ for (size = ix = 0; ix < num_blocks; ix++)
+ {
+ buf.blocks[ix].ptr = bufPtr + size;
+ /* P(" [0x%p]", buf.blocks[ix].ptr); */
+ size += def_size(blks + ix);
+#ifdef STUB_TILER
+ buf.blocks[ix].ssptr = (uint32_t) buf.blocks[ix].ptr;
+#else
+ CHK_I((buf.blocks[ix].ssptr & (PAGE_SIZE - 1)),==,((uint32_t) buf.blocks[ix].ptr & (PAGE_SIZE - 1)));
+#endif
+ }
+ }
+ /* if failed to map: unregister buffer */
+ if (NOT_P(bufPtr,!=,NULL) ||
+ /* or failed to cache tiler buffer */
+ NOT_I(buf_cache_add(&buf, buf_type),==,0))
+ {
+#ifndef STUB_TILER
+ A_I(ioctl(td, TILIOC_URBUF, &buf),==,0);
+#else
+ FREE(buf_c);
+ buf.offset = 0;
+#endif
+ } else {
+ /* update original from local copy */
+ memcpy(blks, buf.blocks, sizeof(tiler_block_info) * num_blocks);
+ }
+
+ return R_P(bufPtr);
+}
+
+/**
+ * Checks whether the tiler_block_info is filled in correctly.
+ * Verifies the pixel format, correct length, width and or
+ * height, the length/stride relationship for 1D buffers, and
+ * the correct stride for 2D buffers. Also verifies block size
+ * to be page sized if desired.
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param blk Pointer to the tiler_block_info struct
+ * @param is_page_sized Whether the block needs to be page
+ * sized (fit on whole pages).
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int check_block(tiler_block_info *blk, bool is_page_sized)
+{
+ /* check pixelformat */
+ if (NOT_I(blk->fmt,>=,PIXEL_FMT_MIN) ||
+ NOT_I(blk->fmt,<=,PIXEL_FMT_MAX)) return MEMMGR_ERR_GENERIC;
+
+
+ if (blk->fmt == PIXEL_FMT_PAGE)
+ { /* check 1D buffers */
+
+ /* length must be multiple of stride if stride > 0 */
+ if (NOT_I(blk->dim.len,>,0) ||
+ (blk->stride && NOT_I(blk->dim.len % blk->stride,==,0)))
+ return MEMMGR_ERR_GENERIC;
+ }
+ else
+ { /* check 2D buffers */
+
+ /* check width, height and stride (must be the default stride or 0) */
+ bytes_t stride = def_stride(blk->dim.area.width * def_bpp(blk->fmt));
+ if (NOT_I(blk->dim.area.width,>,0) ||
+ NOT_I(blk->dim.area.height,>,0) ||
+ (blk->stride && NOT_I(blk->stride,==,stride)))
+ return MEMMGR_ERR_GENERIC;
+ }
+
+ if (is_page_sized && NOT_I(def_size(blk) & (PAGE_SIZE - 1),==,0))
+ return MEMMGR_ERR_GENERIC;
+
+ return MEMMGR_ERR_NONE;
+}
+
+/**
+ * Checks whether the block information is correct for map and
+ * alloc operations. Checks the number of blocks, and validity
+ * of each block. Warns if reserved member is not 0. Also
+ * checks if the alignment/offset requirements are consistent
+ * across the buffer
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blks Pointer to the block info array.
+ * @param num_blocks Number of blocks.
+ * @param num_pagesize_blocks Number of blocks that must be
+ * page sized (these must be in
+ * front)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static int check_blocks(struct tiler_block_info *blks, int num_blocks,
+ int num_pagesize_blocks)
+{
+ /* check arguments */
+ if (NOT_I(num_blocks,>,0) ||
+ NOT_I(num_blocks,<=,TILER_MAX_NUM_BLOCKS)) return MEMMGR_ERR_GENERIC;
+
+ /* check block allocation params */
+ int ix;
+ for (ix = 0; ix < num_blocks; ix++)
+ {
+ struct tiler_block_info *blk = blks + ix;
+ CHK_I(blk->ssptr,==,0);
+ CHK_I(blk->id,==,0);
+ int ret = check_block(blk, ix < num_pagesize_blocks);
+
+ /* check alignment */
+ if (!ret)
+ {
+ }
+ if (ret)
+ {
+ DP("for block[%d]", ix);
+ return ret;
+ }
+
+
+ }
+
+ /* set alignment parameters */
+ return MEMMGR_ERR_NONE;
+}
+
+/**
+ * Resets the ptr and reserved fields of the block info
+ * structures.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param blks Pointer to the block info array.
+ * @param num_blocks Number of blocks.
+ */
+static void reset_blocks(struct tiler_block_info *blks, int num_blocks)
+{
+ int ix;
+ for (ix = 0; ix < num_blocks; ix++)
+ {
+ blks[ix].ssptr = 0;
+ blks[ix].id = 0;
+ blks[ix].ptr = NULL;
+ }
+
+}
+
+bytes_t MemMgr_PageSize()
+{
+ return PAGE_SIZE;
+}
+
+void *MemMgr_Alloc(MemAllocBlock blocks[], int num_blocks)
+{
+ IN;
+ void *bufPtr = NULL;
+
+ /* need to access ssptrs */
+ struct tiler_block_info *blks = (tiler_block_info *) blocks;
+
+ /* check block allocation params, and state */
+ if (NOT_I(check_blocks(blks, num_blocks, num_blocks - 1),==,0) ||
+ NOT_I(inc_ref(),==,0)) goto DONE;
+
+ /* ----- begin recoverable portion ----- */
+ int ix;
+
+ /* allocate each buffer using tiler driver and initialize block info */
+ for (ix = 0; ix < num_blocks; ix++)
+ {
+ if (ix)
+ {
+ /* continue offset between pages */
+ }
+ CHK_I(blks[ix].ptr,==,NULL);
+ if (NOT_I(tiler_alloc(blks + ix),==,0)) goto FAIL_ALLOC;
+ }
+
+ bufPtr = tiler_mmap(blks, num_blocks, BUF_ALLOCED);
+ if (A_P(bufPtr,!=,0)) goto DONE;
+
+ /* ------ error handling ------ */
+FAIL_ALLOC:
+ while (ix)
+ {
+ tiler_free(blks + --ix);
+ }
+
+ /* clear ssptr and ptr fields for all blocks */
+ reset_blocks(blks, num_blocks);
+
+ A_I(dec_ref(),==,0);
+DONE:
+ CHK_I(cache_check(),==,0);
+ return R_P(bufPtr);
+}
+
+int MemMgr_Free(void *bufPtr)
+{
+ IN;
+
+ int ret = MEMMGR_ERR_GENERIC;
+ struct tiler_buf_info buf;
+ ZERO(buf);
+
+ /* retrieve registered buffers from vsptr */
+ /* :NOTE: if this succeeds, Memory Allocator stops tracking this buffer */
+ buf_cache_del(bufPtr, &buf, BUF_ALLOCED);
+
+ if (A_L(buf.offset,!=,0))
+ {
+#ifndef STUB_TILER
+ dump_buf(&buf, "==(URBUF)=>");
+ ret = A_I(ioctl(td, TILIOC_URBUF, &buf),==,0);
+ dump_buf(&buf, "<=(URBUF)==");
+
+ /* free each block */
+ int ix;
+ for (ix = 0; ix < buf.num_blocks; ix++)
+ {
+ ERR_ADD(ret, tiler_free(buf.blocks + ix));
+ }
+
+ /* unmap buffer */
+ bufPtr = (void *)((uint32_t)bufPtr & ~(PAGE_SIZE - 1));
+ ERR_ADD(ret, munmap(bufPtr, map_size(buf.length, buf.offset)));
+#else
+ void *ptr = (void *) buf.offset;
+ FREE(ptr);
+ ret = MEMMGR_ERR_NONE;
+#endif
+ ERR_ADD(ret, dec_ref());
+ }
+
+ CHK_I(cache_check(),==,0);
+ return R_I(ret);
+}
+
+void *MemMgr_Map(MemAllocBlock blocks[], int num_blocks)
+{
+ IN;
+ void *bufPtr = NULL;
+
+ /* need to access ssptrs */
+ struct tiler_block_info *blks = (tiler_block_info *) blocks;
+
+ /* check block params, and state */
+ if (check_blocks(blks, num_blocks, num_blocks) ||
+ NOT_I(inc_ref(),==,0)) goto DONE;
+
+ /* we only map 1 page aligned 1D buffer for now */
+ if (NOT_I(num_blocks,==,1) ||
+ NOT_I(blocks[0].pixelFormat,==,PIXEL_FMT_PAGE) ||
+ NOT_I(blocks[0].dim.len & (PAGE_SIZE - 1),==,0) ||
+#ifdef STUB_TILER
+ NOT_I(MemMgr_IsMapped(blocks[0].ptr),==,0) ||
+#endif
+ NOT_I((uint32_t)blocks[0].ptr & (PAGE_SIZE - 1),==,0))
+ goto FAIL;
+
+ /* ----- begin recoverable portion ----- */
+ int ix;
+
+ /* allocate each buffer using tiler driver */
+ for (ix = 0; ix < num_blocks; ix++)
+ {
+ if (ix)
+ {
+ /* continue offset between pages */
+ }
+ if (NOT_I(blks[ix].ptr,!=,NULL) ||
+ NOT_I(tiler_map(blks + ix),==,0)) goto FAIL_MAP;
+ }
+
+ /* map bufer into tiler space and register with tiler manager */
+ bufPtr = tiler_mmap(blks, num_blocks, BUF_MAPPED);
+ if (A_P(bufPtr,!=,0)) goto DONE;
+
+ /* ------ error handling ------ */
+FAIL_MAP:
+ while (ix)
+ {
+ tiler_unmap(blks + --ix);
+ }
+
+FAIL:
+ /* clear ssptr and ptr fields for all blocks */
+ reset_blocks(blks, num_blocks);
+
+ A_I(dec_ref(),==,0);
+DONE:
+ CHK_I(cache_check(),==,0);
+ return R_P(bufPtr);
+}
+
+int MemMgr_UnMap(void *bufPtr)
+{
+ IN;
+
+ int ret = MEMMGR_ERR_GENERIC;
+ struct tiler_buf_info buf;
+ ZERO(buf);
+
+ /* retrieve registered buffers from vsptr */
+ /* :NOTE: if this succeeds, Memory Allocator stops tracking this buffer */
+ buf_cache_del(bufPtr, &buf, BUF_MAPPED);
+
+ if (A_L(buf.offset,!=,0))
+ {
+#ifndef STUB_TILER
+ dump_buf(&buf, "==(URBUF)=>");
+ ret = A_I(ioctl(td, TILIOC_URBUF, &buf),==,0);
+ dump_buf(&buf, "<=(URBUF)==");
+
+ /* unmap each block */
+ int ix;
+ for (ix = 0; ix < buf.num_blocks; ix++)
+ {
+ ERR_ADD(ret, tiler_unmap(buf.blocks + ix));
+ }
+
+ /* unmap buffer */
+ bufPtr = (void *)((uint32_t)bufPtr & ~(PAGE_SIZE - 1));
+ ERR_ADD(ret, munmap(bufPtr, map_size(buf.length, buf.offset)));
+#else
+ struct tiler_buf_info *ptr = (struct tiler_buf_info *) buf.offset;
+ FREE(ptr[1].blocks[0].ptr);
+ FREE(ptr);
+ ret = MEMMGR_ERR_NONE;
+#endif
+ ERR_ADD(ret, dec_ref());
+ }
+
+ CHK_I(cache_check(),==,0);
+ return R_I(ret);
+}
+
+bool MemMgr_Is1DBlock(void *ptr)
+{
+ IN;
+
+ SSPtr ssptr = TilerMem_VirtToPhys(ptr);
+ enum tiler_fmt fmt = tiler_get_fmt(ssptr);
+ return R_I(fmt == TILFMT_PAGE);
+}
+
+bool MemMgr_Is2DBlock(void *ptr)
+{
+ IN;
+
+ SSPtr ssptr = TilerMem_VirtToPhys(ptr);
+ enum tiler_fmt fmt = tiler_get_fmt(ssptr);
+ return R_I(fmt == TILFMT_8BIT || fmt == TILFMT_16BIT ||
+ fmt == TILFMT_32BIT);
+}
+
+bool MemMgr_IsMapped(void *ptr)
+{
+ IN;
+ SSPtr ssptr = TilerMem_VirtToPhys(ptr);
+ enum tiler_fmt fmt = tiler_get_fmt(ssptr);
+ return R_I(fmt == TILFMT_8BIT || fmt == TILFMT_16BIT ||
+ fmt == TILFMT_32BIT || fmt == TILFMT_PAGE);
+}
+
+bytes_t MemMgr_GetStride(void *ptr)
+{
+ IN;
+#ifndef STUB_TILER
+ struct tiler_buf_info buf;
+ ZERO(buf);
+
+ /* find block that this buffer belongs to */
+ buf_cache_query(ptr, &buf, BUF_ALLOCED | BUF_MAPPED);
+ void *bufPtr = buf.blocks[0].ptr;
+
+ A_I(inc_ref(),==,0);
+
+ /* for tiler mapped buffers, get saved stride information */
+ if (buf.offset)
+ {
+ /* walk through block to determine which stride we need */
+ int ix;
+ for (ix = 0; ix < buf.num_blocks; ix++)
+ {
+ bytes_t size = def_size(buf.blocks + ix);
+ if (bufPtr <= ptr && ptr < bufPtr + size) {
+ A_I(dec_ref(),==,0);
+ return R_UP(buf.blocks[ix].stride);
+ }
+ bufPtr += size;
+ }
+ A_I(dec_ref(),==,0);
+ DP("assert: should not ever get here");
+ return R_UP(0);
+ }
+ /* see if pointer is valid */
+ else if (TilerMem_VirtToPhys(ptr) == 0)
+ {
+ A_I(dec_ref(),==,0);
+ return R_UP(0);
+ }
+ A_I(dec_ref(),==,0);
+#else
+ /* if emulating, we need to get through all allocated memory segments */
+ pthread_mutex_lock(&che_mutex);
+ init();
+
+ _AllocData *ad;
+ if (!ptr) return R_UP(0);
+ DLIST_MLOOP(bufs, ad, link) {
+ int ix;
+ struct tiler_buf_info *buf = (struct tiler_buf_info *) ad->buf.offset;
+ for (ix = 0; ix < buf->num_blocks; ix++)
+ {
+ if (ptr >= buf->blocks[ix].ptr &&
+ ptr < buf->blocks[ix].ptr + def_size(buf->blocks + ix))
+ {
+ bytes_t stride = buf->blocks[ix].stride;
+ pthread_mutex_unlock(&che_mutex);
+ return R_UP(stride);
+ }
+ }
+ }
+ pthread_mutex_unlock(&che_mutex);
+#endif
+ return R_UP(PAGE_SIZE);
+}
+
+bytes_t TilerMem_GetStride(SSPtr ssptr)
+{
+ IN;
+ switch(tiler_get_fmt(ssptr))
+ {
+ case TILFMT_8BIT: return R_UP(TILER_STRIDE_8BIT);
+ case TILFMT_16BIT: return R_UP(TILER_STRIDE_16BIT);
+ case TILFMT_32BIT: return R_UP(TILER_STRIDE_32BIT);
+ case TILFMT_PAGE: return R_UP(PAGE_SIZE);
+ default: return R_UP(0);
+ }
+}
+
+SSPtr TilerMem_VirtToPhys(void *ptr)
+{
+#ifndef STUB_TILER
+ SSPtr ssptr = 0;
+ if(!NOT_I(inc_ref(),==,0))
+ {
+ ssptr = ioctl(td, TILIOC_GSSP, (unsigned long) ptr);
+ A_I(dec_ref(),==,0);
+ }
+ return (SSPtr)R_P(ssptr);
+#else
+ return (SSPtr)ptr;
+#endif
+}
+
+/**
+ * Internal Unit Test. Tests the static methods of this
+ * library. Assumes an unitialized state as well.
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @return 0 for success, non-0 error value for failure.
+ */
+int __test__MemMgr()
+{
+ int ret = 0;
+
+ /* state check */
+ ret |= NOT_I(TILER_PAGE_WIDTH * TILER_PAGE_HEIGHT,==,PAGE_SIZE);
+ ret |= NOT_I(refCnt,==,0);
+ ret |= NOT_I(inc_ref(),==,0);
+ ret |= NOT_I(refCnt,==,1);
+ ret |= NOT_I(dec_ref(),==,0);
+ ret |= NOT_I(refCnt,==,0);
+
+ /* enumeration check */
+ ret |= NOT_I(PIXEL_FMT_8BIT,==,TILFMT_8BIT);
+ ret |= NOT_I(PIXEL_FMT_16BIT,==,TILFMT_16BIT);
+ ret |= NOT_I(PIXEL_FMT_32BIT,==,TILFMT_32BIT);
+ ret |= NOT_I(PIXEL_FMT_PAGE,==,TILFMT_PAGE);
+ ret |= NOT_I(sizeof(MemAllocBlock),==,sizeof(struct tiler_block_info));
+
+ /* void * arithmetic */
+ void *a = (void *)1000, *b = a + 2000, *c = (void *)3000;
+ ret |= NOT_P(b,==,c);
+
+ /* def_stride */
+ ret |= NOT_I(def_stride(0),==,0);
+ ret |= NOT_I(def_stride(1),==,PAGE_SIZE);
+ ret |= NOT_I(def_stride(PAGE_SIZE),==,PAGE_SIZE);
+ ret |= NOT_I(def_stride(PAGE_SIZE + 1),==,2 * PAGE_SIZE);
+
+ /* def_bpp */
+ ret |= NOT_I(def_bpp(PIXEL_FMT_32BIT),==,4);
+ ret |= NOT_I(def_bpp(PIXEL_FMT_16BIT),==,2);
+ ret |= NOT_I(def_bpp(PIXEL_FMT_8BIT),==,1);
+
+ /* def_size */
+ tiler_block_info blk = {0};
+ blk.fmt = TILFMT_8BIT;
+ blk.dim.area.width = PAGE_SIZE * 8 / 10;
+ blk.dim.area.height = 10;
+ ret |= NOT_I(def_size(&blk),==,10 * PAGE_SIZE);
+
+ blk.fmt = TILFMT_16BIT;
+ blk.dim.area.width = PAGE_SIZE * 7 / 10;
+ ret |= NOT_I(def_size(&blk),==,20 * PAGE_SIZE);
+ blk.dim.area.width = PAGE_SIZE * 4 / 10;
+ ret |= NOT_I(def_size(&blk),==,10 * PAGE_SIZE);
+
+ blk.fmt = TILFMT_32BIT;
+ ret |= NOT_I(def_size(&blk),==,20 * PAGE_SIZE);
+ blk.dim.area.width = PAGE_SIZE * 6 / 10;
+ ret |= NOT_I(def_size(&blk),==,30 * PAGE_SIZE);
+
+ return ret;
+}
+
diff --git a/tiler/memmgr.h b/tiler/memmgr.h
new file mode 100644
index 0000000..8f99904
--- /dev/null
+++ b/tiler/memmgr.h
@@ -0,0 +1,311 @@
+/*
+ * memmgr.h
+ *
+ * Memory Allocator Interface functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MEMMGR_H_
+#define _MEMMGR_H_
+
+/* retrieve type definitions */
+#include "mem_types.h"
+
+/**
+ * Memory Allocator is responsible for:
+ * <ol>
+ * <li>Allocate 1D and 2D blocks and pack them into a buffer.
+ * <li>Free such allocated blocks
+ * <li>Map 1D buffers into tiler space
+ * <li>Unmap 1D buffers from tiler space
+ * <li>Verify if an address lies in 1D or 2D space, whether it
+ * is mapped (to tiler space)
+ * <li>Mapping Ducati memory blocks to host processor and vice
+ * versa.
+ * </ol>
+ *
+ * The allocator distinguishes between:
+ * <ul>
+ * <li>1D and 2D blocks
+ * <li>2D blocks allocated by MemAlloc are non-cacheable. All
+ * other blocks are cacheable (e.g. 1D blocks). Preallocated
+ * may or may not be cacheable, depending on how they've been
+ * allocated, but are assumed to be cacheable.
+ * <li>buffers (an ordered collection of one or more blocks
+ * mapped consecutively)
+ * </ul>
+ *
+ * The allocator tracks each buffer based on (addr, size).
+ *
+ * Also, via the tiler manager, it tracks each block. The tiler
+ * manager itself also tracks each buffer.
+ *
+ */
+
+/**
+ * Memory Allocator block specification
+ *
+ * Size of a 2D buffer is calculated as height * stride. stride
+ * is the smallest multiple of page size that is at least
+ * the width, and is set by MemMgr_Alloc.
+ *
+ * Size of a 1D buffer is calculated as length. stride is not
+ * set by MemMgr_Alloc, but it can be set by the user. length
+ * must be a multiple of stride unless stride is 0.
+ *
+ * @author a0194118 (9/1/2009)
+ */
+struct MemAllocBlock {
+ pixel_fmt_t pixelFormat; /* pixel format */
+ union {
+ struct {
+ pixels_t width; /* width of 2D buffer */
+ pixels_t height; /* height of 2D buffer */
+ } area;
+ bytes_t len; /* length of 1D buffer. Must be multiple of
+ stride if stride is not 0. */
+ } dim;
+ uint32_t stride; /* must be multiple of page size. Can be 0 only
+ if pixelFormat is PIXEL_FMT_PAGE. */
+ void *ptr; /* pointer to beginning of buffer */
+ uint32_t id; /* buffer ID - received at allocation */
+ uint32_t key; /* buffer key - given at allocation */
+ uint32_t group_id; /* group ID */
+ /* alignment requirements for ssptr: ssptr & (align - 1) == offs */
+ uint32_t reserved; /* system space address (used internally) */
+};
+
+typedef struct MemAllocBlock MemAllocBlock;
+
+/**
+ * Returns the page size. This is required for allocating 1D
+ * blocks that stack under any other blocks.
+ *
+ * @author a0194118 (9/3/2009)
+ *
+ * @return Page size.
+ */
+bytes_t MemMgr_PageSize();
+
+/**
+ * Allocates a buffer as a list of blocks (1D or 2D), and maps
+ * them so that they are packaged consecutively. Returns the
+ * pointer to the first block, or NULL on failure.
+ * <p>
+ * The size of each block other than the last must be a multiple
+ * of the page size. This ensures that the blocks stack
+ * correctly. Set stride to 0 to avoid stride/length alignment
+ * constraints. Stride of 2D blocks will be updated by this
+ * method.
+ * <p>
+ * 2D blocks will be non-cacheable, while 1D blocks will be
+ * cacheable.
+ * <p>
+ * On success, the buffer is registered with the memory
+ * allocator.
+ * <p>
+ * As a side effect, if the operation was successful, the ssptr
+ * fields of the block specification will be filled with the
+ * system-space addresses, while the ptr fields will be set to
+ * the individual blocks. The stride information is set for 2D
+ * blocks.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param blocks Block specification information. This
+ * should be an array of at least num_blocks
+ * elements.
+ * @param num_blocks Number of blocks to be included in the
+ * allocated memory segment
+ *
+ * @return Pointer to the buffer, which is also the pointer to
+ * the first allocated block. NULL if allocation failed.
+ */
+void *MemMgr_Alloc(MemAllocBlock blocks[], int num_blocks);
+
+/**
+ * Frees a buffer allocated by MemMgr_Alloc(). It fails for
+ * any buffer not allocated by MemMgr_Alloc() or one that has
+ * been already freed.
+ * <p>
+ * It also unregisters the buffer with the memory allocator.
+ * <p>
+ * This function unmaps the processor's virtual address to the
+ * tiler address for all blocks allocated, unregisters the
+ * buffer, and frees all of its tiler blocks.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param bufPtr Pointer to the buffer allocated (returned)
+ * by MemMgr_Alloc()
+ *
+ * @return 0 on success. Non-0 error value on failure.
+ */
+int MemMgr_Free(void *bufPtr);
+
+/**
+ * This function maps the user provided data buffer to the tiler
+ * space as blocks, and maps that area into the process space
+ * consecutively. You can map a data buffer multiple times,
+ * resulting in multiple mapping for the same buffer. However,
+ * you cannot map a buffer that is already mapped to tiler, e.g.
+ * a buffer pointer returned by this method.
+ *
+ * In phase 1 and 2, the supported configurations are:
+ * <ol>
+ * <li> Mapping exactly one 1D block to tiler space (e.g.
+ * MapIn1DMode).
+ * </ol>
+ *
+ * @author a0194118 (9/3/2009)
+ *
+ * @param blocks Block specification information. This
+ * should be an array of at least num_blocks
+ * elements. The ptr fields must contain the
+ * user allocated buffers for the block.
+ * These will be updated with the mapped
+ * addresses of these blocks on success.
+ *
+ * Each block must be page aligned. Length of
+ * each block also must be page aligned.
+ *
+ * @param num_blocks Number of blocks to be included in the
+ * mapped memory segment
+ *
+ * @return Pointer to the buffer, which is also the pointer to
+ * the first mapped block. NULL if allocation failed.
+ */
+void *MemMgr_Map(MemAllocBlock blocks[], int num_blocks);
+
+/**
+ * This function unmaps the user provided data buffer from tiler
+ * space that was mapped to the tiler space in paged mode using
+ * MemMgr_Map(). It also unmaps the buffer itself from the
+ * process space. Trying to unmap a previously unmapped buffer
+ * will fail.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param bufPtr Pointer to the buffer as returned by a
+ * previous call to MemMgr_MapIn1DMode()
+ *
+ * @return 0 on success. Non-0 error value on failure.
+ */
+int MemMgr_UnMap(void *bufPtr);
+
+/**
+ * Checks if a given virtual address is mapped by tiler manager
+ * to tiler space.
+ * <p>
+ * This function is equivalent to MemMgr_Is1DBuffer(ptr) ||
+ * MemMgr_Is2DBuffer(ptr). It retrieves the system space
+ * address that the virtual address maps to. If this system
+ * space address lies within the tiler area, the function
+ * returns TRUE.
+ *
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param ptr Pointer to a virtual address
+ *
+ * @return TRUE (non-0) if the virtual address is within a
+ * buffer that was mapped into tiler space, e.g. by
+ * calling MemMgr_MapIn1DMode() or MemMgr_MapIn2DMode()
+ */
+bool MemMgr_IsMapped(void *ptr);
+
+/**
+ * Checks if a given virtual address lies in a tiler 1D buffer.
+ * <p>
+ * This function retrieves the system space address that the
+ * virtual address maps to. If this system space address is
+ * within the 1D tiler area, it is considered lying within a 1D
+ * buffer.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param ptr Pointer to a virtual address
+ *
+ * @return TRUE (non-0) if the virtual address is within a
+ * mapped 1D tiler buffer. FALSE (0) if the virtual
+ * address is not mapped, invalid, or is mapped to an
+ * area other than a 1D tiler buffer. In phase 1,
+ * however, it is TRUE it the virtual address is mapped
+ * to the page-mode area of the tiler space.
+ */
+bool MemMgr_Is1DBlock(void *ptr);
+
+/**
+ * Checks if a given virtual address lies in a 2D buffer.
+ * <p>
+ * This function retrieves the system space address that the
+ * virtual address maps to. If this system space address is
+ * within the 2D tiler area, it is considered lying within a 2D
+ * buffer.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param ptr Pointer to a virtual address
+ *
+ * @return TRUE (non-0) if the virtual address is within a
+ * mapped 2D buffer. FALSE (0) if the virtual address
+ * is not mapped, invalid, or is mapped to an area other
+ * than a 2D buffer. In phase 1, however, it is TRUE it
+ * the virtual address is mapped to any area of the
+ * tiler space other than page mode.
+ */
+bool MemMgr_Is2DBlock(void *ptr);
+
+/**
+ * Returns the stride corresponding to a virtual address. For
+ * 1D and 2D buffers it returns the stride supplied
+ * with/acquired during the allocation/mapping. For non-tiler
+ * buffers it returns the page size.
+ * <p>
+ * NOTE: on Ducati phase 1, stride should return 16K for 8-bit
+ * 2D buffers, 32K for 16-bit and 32-bit 2D buffers, the stride
+ * used for alloc/map for 1D buffers, and the page size for
+ * non-tiler buffers.
+ *
+ * For unmapped addresses it returns 0. However, this cannot be
+ * used to determine if an address is unmapped as 1D buffers
+ * could also have 0 stride (e.g. compressed buffers).
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param ptr pointer to a virtual address
+ *
+ * @return The virtual stride of the block that contains the
+ * address.
+ */
+bytes_t MemMgr_GetStride(void *ptr);
+
+#endif
diff --git a/tiler/memmgr_test.c b/tiler/memmgr_test.c
new file mode 100644
index 0000000..2bb76b6
--- /dev/null
+++ b/tiler/memmgr_test.c
@@ -0,0 +1,1693 @@
+/*
+ * memmgr_test.c
+ *
+ * Memory Allocator Interface tests.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* retrieve type definitions */
+#define __DEBUG__
+#undef __DEBUG_ENTRY__
+#define __DEBUG_ASSERT__
+
+#define __MAP_OK__
+#undef __WRITE_IN_STRIDE__
+#undef STAR_TRACE_MEM
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+#include <utils.h>
+#include <list_utils.h>
+#include <debug_utils.h>
+#include <memmgr.h>
+#include <tilermem.h>
+#include <tilermem_utils.h>
+#include <testlib.h>
+
+/* for star_tiler_test */
+#include <fcntl.h> /* open() */
+#include <unistd.h> /* close() */
+#include <sys/ioctl.h> /* ioctl() */
+
+#define FALSE 0
+#define TESTERR_NOTIMPLEMENTED -65378
+
+#define MAX_ALLOCS 512
+
+#define TESTS\
+ T(alloc_1D_test(4096, 0))\
+ T(alloc_2D_test(64, 64, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(64, 64, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(64, 64, PIXEL_FMT_32BIT))\
+ T(alloc_NV12_test(64, 64))\
+ T(map_1D_test(4096, 0))\
+ T(alloc_1D_test(176 * 144 * 2, 0))\
+ T(alloc_2D_test(176, 144, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(176, 144, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(176, 144, PIXEL_FMT_32BIT))\
+ T(alloc_NV12_test(176, 144))\
+ T(map_1D_test(176 * 144 * 2, 0))\
+ T(alloc_1D_test(640 * 480 * 2, 0))\
+ T(alloc_2D_test(640, 480, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(640, 480, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(640, 480, PIXEL_FMT_32BIT))\
+ T(alloc_NV12_test(640, 480))\
+ T(map_1D_test(640 * 480 * 2, 0))\
+ T(alloc_1D_test(848 * 480 * 2, 0))\
+ T(alloc_2D_test(848, 480, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(848, 480, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(848, 480, PIXEL_FMT_32BIT))\
+ T(alloc_NV12_test(848, 480))\
+ T(map_1D_test(848 * 480 * 2, 0))\
+ T(alloc_1D_test(1280 * 720 * 2, 0))\
+ T(alloc_2D_test(1280, 720, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(1280, 720, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(1280, 720, PIXEL_FMT_32BIT))\
+ T(alloc_NV12_test(1280, 720))\
+ T(map_1D_test(1280 * 720 * 2, 0))\
+ T(alloc_1D_test(1920 * 1080 * 2, 0))\
+ T(alloc_2D_test(1920, 1080, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(1920, 1080, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(1920, 1080, PIXEL_FMT_32BIT))\
+ T(alloc_NV12_test(1920, 1080))\
+ T(map_1D_test(1920 * 1080 * 2, 0))\
+ T(map_1D_test(4096, 0))\
+ T(map_1D_test(8192, 0))\
+ T(map_1D_test(16384, 0))\
+ T(map_1D_test(32768, 0))\
+ T(map_1D_test(65536, 0))\
+ T(neg_alloc_tests())\
+ T(neg_free_tests())\
+ T(neg_map_tests())\
+ T(neg_unmap_tests())\
+ T(neg_check_tests())\
+ T(page_size_test())\
+ T(maxalloc_2D_test(2500, 32, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(2500, 16, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1250, 16, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(5000, 32, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(5000, 16, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(2500, 16, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(alloc_2D_test(8193, 16, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(8193, 16, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(4097, 16, PIXEL_FMT_32BIT))\
+ T(alloc_2D_test(16384, 16, PIXEL_FMT_8BIT))\
+ T(alloc_2D_test(16384, 16, PIXEL_FMT_16BIT))\
+ T(alloc_2D_test(8192, 16, PIXEL_FMT_32BIT))\
+ T(!alloc_2D_test(16385, 16, PIXEL_FMT_8BIT))\
+ T(!alloc_2D_test(16385, 16, PIXEL_FMT_16BIT))\
+ T(!alloc_2D_test(8193, 16, PIXEL_FMT_32BIT))\
+ T(maxalloc_1D_test(4096, MAX_ALLOCS))\
+ T(maxalloc_2D_test(64, 64, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(64, 64, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(64, 64, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_NV12_test(64, 64, MAX_ALLOCS))\
+ T(maxmap_1D_test(4096, MAX_ALLOCS))\
+ T(maxalloc_1D_test(176 * 144 * 2, MAX_ALLOCS))\
+ T(maxalloc_2D_test(176, 144, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(176, 144, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(176, 144, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_NV12_test(176, 144, MAX_ALLOCS))\
+ T(maxmap_1D_test(176 * 144 * 2, MAX_ALLOCS))\
+ T(maxalloc_1D_test(640 * 480 * 2, MAX_ALLOCS))\
+ T(maxalloc_2D_test(640, 480, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(640, 480, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(640, 480, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_NV12_test(640, 480, MAX_ALLOCS))\
+ T(maxmap_1D_test(640 * 480 * 2, MAX_ALLOCS))\
+ T(maxalloc_1D_test(848 * 480 * 2, MAX_ALLOCS))\
+ T(maxalloc_2D_test(848, 480, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(848, 480, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(848, 480, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_NV12_test(848, 480, MAX_ALLOCS))\
+ T(maxmap_1D_test(848 * 480 * 2, MAX_ALLOCS))\
+ T(maxalloc_1D_test(1280 * 720 * 2, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1280, 720, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1280, 720, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1280, 720, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_NV12_test(1280, 720, MAX_ALLOCS))\
+ T(maxmap_1D_test(1280 * 720 * 2, MAX_ALLOCS))\
+ T(maxalloc_1D_test(1920 * 1080 * 2, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1920, 1080, PIXEL_FMT_8BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1920, 1080, PIXEL_FMT_16BIT, MAX_ALLOCS))\
+ T(maxalloc_2D_test(1920, 1080, PIXEL_FMT_32BIT, MAX_ALLOCS))\
+ T(maxalloc_NV12_test(1920, 1080, 2))\
+ T(maxalloc_NV12_test(1920, 1080, MAX_ALLOCS))\
+ T(maxmap_1D_test(1920 * 1080 * 2, MAX_ALLOCS))\
+ T(star_tiler_test(1000, 10))\
+ T(star_tiler_test(1000, 30))\
+ T(star_test(100, 10))\
+ T(star_test(1000, 10))\
+
+/* this is defined in memmgr.c, but not exported as it is for internal
+ use only */
+extern int __test__MemMgr();
+
+/**
+ * Returns the default page stride for this block
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param width Width of 2D container
+ *
+ * @return Stride
+ */
+static bytes_t def_stride(bytes_t width)
+{
+ return ROUND_UP_TO2POW(width, PAGE_SIZE);
+}
+
+/**
+ * Returns the bytes per pixel for the pixel format.
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param pixelFormat Pixelformat
+ *
+ * @return Bytes per pixel
+ */
+static bytes_t def_bpp(pixel_fmt_t pixelFormat)
+{
+ return (pixelFormat == PIXEL_FMT_32BIT ? 4 :
+ pixelFormat == PIXEL_FMT_16BIT ? 2 : 1);
+}
+
+/**
+ * This method fills up a range of memory using a start address
+ * and start value. The method of filling ensures that
+ * accidentally overlapping regions have minimal chances of
+ * matching, even if the same starting value is used. This is
+ * because the difference between successive values varies as
+ * such. This series only repeats after 704189 values, so the
+ * probability of a match for a range of at least 2 values is
+ * less than 2*10^-11.
+ *
+ * V(i + 1) - V(i) = { 1, 2, 3, ..., 65535, 2, 4, 6, 8 ...,
+ * 65534, 3, 6, 9, 12, ..., 4, 8, 12, 16, ... }
+ *
+ * @author a0194118 (9/6/2009)
+ *
+ * @param start start value
+ * @param block pointer to block info strucure
+ */
+void fill_mem(uint16_t start, MemAllocBlock *block)
+{
+ IN;
+ uint16_t *ptr = (uint16_t *)block->ptr, delta = 1, step = 1;
+ bytes_t height, width, stride, i;
+ if (block->pixelFormat == PIXEL_FMT_PAGE)
+ {
+ height = 1;
+ stride = width = block->dim.len;
+ }
+ else
+ {
+ height = block->dim.area.height;
+ width = block->dim.area.width;
+ stride = block->stride;
+ }
+ width *= def_bpp(block->pixelFormat);
+ bytes_t size = height * stride;
+
+ P("(%p,0x%x*0x%x,s=0x%x)=0x%x", block->ptr, width, height, stride, start);
+
+ CHK_I(width,<=,stride);
+ uint32_t *ptr32 = (uint32_t *)ptr;
+ while (height--)
+ {
+ if (block->pixelFormat == PIXEL_FMT_32BIT)
+ {
+ for (i = 0; i < width; i += sizeof(uint32_t))
+ {
+ uint32_t val = (start & 0xFFFF) | (((uint32_t)(start + delta) & 0xFFFF) << 16);
+ *ptr32++ = val;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && (height || ((PAGE_SIZE - 1) & (uint32_t)ptr32)))
+ {
+ *ptr32++ = 0;
+ i += sizeof(uint32_t);
+ }
+#else
+ ptr32 += (stride - i) / sizeof(uint32_t);
+#endif
+ }
+ else
+ {
+ for (i = 0; i < width; i += sizeof(uint16_t))
+ {
+ *ptr++ = start;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && (height || ((PAGE_SIZE - 1) & (uint32_t)ptr)))
+ {
+ *ptr++ = 0;
+ i += sizeof(uint16_t);
+ }
+#else
+ ptr += (stride - i) / sizeof(uint16_t);
+#endif
+
+ }
+ }
+ CHK_P((block->pixelFormat == PIXEL_FMT_32BIT ? (void *)ptr32 : (void *)ptr),==,
+ (block->ptr + size));
+ OUT;
+}
+
+/**
+ * This verifies if a range of memory at a given address was
+ * filled up using the start value.
+ *
+ * @author a0194118 (9/6/2009)
+ *
+ * @param start start value
+ * @param block pointer to block info strucure
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int check_mem(uint16_t start, MemAllocBlock *block)
+{
+ IN;
+ uint16_t *ptr = (uint16_t *)block->ptr, delta = 1, step = 1;
+ bytes_t height, width, stride, r, i;
+ if (block->pixelFormat == PIXEL_FMT_PAGE)
+ {
+ height = 1;
+ stride = width = block->dim.len;
+ }
+ else
+ {
+ height = block->dim.area.height;
+ width = block->dim.area.width;
+ stride = block->stride;
+ }
+ width *= def_bpp(block->pixelFormat);
+
+ CHK_I(width,<=,stride);
+ uint32_t *ptr32 = (uint32_t *)ptr;
+ for (r = 0; r < height; r++)
+ {
+ if (block->pixelFormat == PIXEL_FMT_32BIT)
+ {
+ for (i = 0; i < width; i += sizeof(uint32_t))
+ {
+ uint32_t val = (start & 0xFFFF) | (((uint32_t)(start + delta) & 0xFFFF) << 16);
+ if (*ptr32++ != val) {
+ DP("assert: val[%u,%u] (=0x%x) != 0x%x", r, i, *--ptr32, val);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && ((r < height - 1) || ((PAGE_SIZE - 1) & (uint32_t)ptr32)))
+ {
+ if (*ptr32++) {
+ DP("assert: val[%u,%u] (=0x%x) != 0", r, i, *--ptr32);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ i += sizeof(uint32_t);
+ }
+#else
+ ptr32 += (stride - i) / sizeof(uint32_t);
+#endif
+ }
+ else
+ {
+ for (i = 0; i < width; i += sizeof(uint16_t))
+ {
+ if (*ptr++ != start) {
+ DP("assert: val[%u,%u] (=0x%x) != 0x%x", r, i, *--ptr, start);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && ((r < height - 1) || ((PAGE_SIZE - 1) & (uint32_t)ptr)))
+ {
+ if (*ptr++) {
+ DP("assert: val[%u,%u] (=0x%x) != 0", r, i, *--ptr);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ i += sizeof(uint16_t);
+ }
+#else
+ ptr += (stride - i) / sizeof(uint16_t);
+#endif
+ }
+ }
+ return R_I(MEMMGR_ERR_NONE);
+}
+
+/**
+ * This method allocates a 1D tiled buffer of the given length
+ * and stride using MemMgr_Alloc. If successful, it checks
+ * that the block information was updated with the pointer to
+ * the block. Additionally, it verifies the correct return
+ * values for MemMgr_IsMapped, MemMgr_Is1DBlock,
+ * MemMgr_Is2DBlock, MemMgr_GetStride, TilerMem_GetStride. It
+ * also verifies TilerMem_VirtToPhys using an internally stored
+ * value of the ssptr. If any of these verifications fail, the
+ * buffer is freed. Otherwise, it is filled using the given
+ * start value.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param length Buffer length
+ * @param stride Buffer stride
+ * @param val Fill start value
+ *
+ * @return pointer to the allocated buffer, or NULL on failure
+ */
+void *alloc_1D(bytes_t length, bytes_t stride, uint16_t val)
+{
+ MemAllocBlock block;
+ memset(&block, 0, sizeof(block));
+
+ block.pixelFormat = PIXEL_FMT_PAGE;
+ block.dim.len = length;
+ block.stride = stride;
+
+ void *bufPtr = MemMgr_Alloc(&block, 1);
+ CHK_P(bufPtr,==,block.ptr);
+ if (bufPtr) {
+ if (NOT_I(MemMgr_IsMapped(bufPtr),!=,0) ||
+ NOT_I(MemMgr_Is1DBlock(bufPtr),!=,0) ||
+ NOT_I(MemMgr_Is2DBlock(bufPtr),==,0) ||
+ NOT_I(MemMgr_GetStride(bufPtr),==,block.stride) ||
+ NOT_P(TilerMem_VirtToPhys(bufPtr),==,block.reserved) ||
+ NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(bufPtr)),==,PAGE_SIZE) ||
+ NOT_L((PAGE_SIZE - 1) & (long)bufPtr,==,(PAGE_SIZE - 1) & block.reserved))
+ {
+ MemMgr_Free(bufPtr);
+ return NULL;
+ }
+ fill_mem(val, &block);
+ }
+ return bufPtr;
+}
+
+/**
+ * This method frees a 1D tiled buffer. The given length,
+ * stride and start values are used to verify that the buffer is
+ * still correctly filled. In the event of any errors, the
+ * error value is returned.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param length Buffer length
+ * @param stride Buffer stride
+ * @param val Fill start value
+ * @param bufPtr Pointer to the allocated buffer
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int free_1D(bytes_t length, bytes_t stride, uint16_t val, void *bufPtr)
+{
+ MemAllocBlock block;
+ memset(&block, 0, sizeof(block));
+
+ block.pixelFormat = PIXEL_FMT_PAGE;
+ block.dim.len = length;
+ block.stride = stride;
+ block.ptr = bufPtr;
+
+ int ret = A_I(check_mem(val, &block),==,0);
+ ERR_ADD(ret, MemMgr_Free(bufPtr));
+ return ret;
+}
+
+/**
+ * This method allocates a 2D tiled buffer of the given width,
+ * height, stride and pixel format using
+ * MemMgr_Alloc. If successful, it checks that the block
+ * information was updated with the pointer to the block.
+ * Additionally, it verifies the correct return values for
+ * MemMgr_IsMapped, MemMgr_Is1DBlock, MemMgr_Is2DBlock,
+ * MemMgr_GetStride, TilerMem_GetStride. It also verifies
+ * TilerMem_VirtToPhys using an internally stored value of the
+ * ssptr. If any of these verifications fail, the buffer is
+ * freed. Otherwise, it is filled using the given start value.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ * @param fmt Pixel format
+ * @param stride Buffer stride
+ * @param val Fill start value
+ *
+ * @return pointer to the allocated buffer, or NULL on failure
+ */
+void *alloc_2D(pixels_t width, pixels_t height, pixel_fmt_t fmt, bytes_t stride,
+ uint16_t val)
+{
+ MemAllocBlock block;
+ memset(&block, 0, sizeof(block));
+
+ block.pixelFormat = fmt;
+ block.dim.area.width = width;
+ block.dim.area.height = height;
+ block.stride = stride;
+
+ void *bufPtr = MemMgr_Alloc(&block, 1);
+ CHK_P(bufPtr,==,block.ptr);
+ if (bufPtr) {
+ bytes_t cstride = (fmt == PIXEL_FMT_8BIT ? TILER_STRIDE_8BIT :
+ fmt == PIXEL_FMT_16BIT ? TILER_STRIDE_16BIT :
+ TILER_STRIDE_32BIT);
+
+ if (NOT_I(MemMgr_IsMapped(bufPtr),!=,0) ||
+ NOT_I(MemMgr_Is1DBlock(bufPtr),==,0) ||
+ NOT_I(MemMgr_Is2DBlock(bufPtr),!=,0) ||
+ NOT_I(block.stride,!=,0) ||
+ NOT_I(MemMgr_GetStride(bufPtr),==,block.stride) ||
+ NOT_P(TilerMem_VirtToPhys(bufPtr),==,block.reserved) ||
+ NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(bufPtr)),==,cstride) ||
+ NOT_L((PAGE_SIZE - 1) & (long)bufPtr,==,(PAGE_SIZE - 1) & block.reserved))
+ {
+ MemMgr_Free(bufPtr);
+ return NULL;
+ }
+ fill_mem(val, &block);
+ }
+ return bufPtr;
+}
+
+/**
+ * This method frees a 2D tiled buffer. The given width,
+ * height, pixel format, stride and start values are used to
+ * verify that the buffer is still correctly filled. In the
+ * event of any errors, the error value is returned.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ * @param fmt Pixel format
+ * @param stride Buffer stride
+ * @param val Fill start value
+ * @param bufPtr Pointer to the allocated buffer
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int free_2D(pixels_t width, pixels_t height, pixel_fmt_t fmt, bytes_t stride,
+ uint16_t val, void *bufPtr)
+{
+ MemAllocBlock block;
+ memset(&block, 0, sizeof(block));
+
+ block.pixelFormat = fmt;
+ block.dim.area.width = width;
+ block.dim.area.height = height;
+ block.stride = def_stride(width * def_bpp(fmt));
+ block.ptr = bufPtr;
+
+ int ret = A_I(check_mem(val, &block),==,0);
+ ERR_ADD(ret, MemMgr_Free(bufPtr));
+ return ret;
+}
+
+/**
+ * This method allocates an NV12 tiled buffer of the given width
+ * and height using MemMgr_Alloc. If successful, it checks that
+ * the block informations were updated with the pointers to the
+ * individual blocks. Additionally, it verifies the correct
+ * return values for MemMgr_IsMapped, MemMgr_Is1DBlock,
+ * MemMgr_Is2DBlock, MemMgr_GetStride, TilerMem_GetStride for
+ * both blocks. It also verifies TilerMem_VirtToPhys using an
+ * internally stored values of the ssptr. If any of these
+ * verifications fail, the buffer is freed. Otherwise, it is
+ * filled using the given start value.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ * @param val Fill start value
+ *
+ * @return pointer to the allocated buffer, or NULL on failure
+ */
+void *alloc_NV12(pixels_t width, pixels_t height, uint16_t val)
+{
+ MemAllocBlock blocks[2];
+ ZERO(blocks);
+
+ blocks[0].pixelFormat = PIXEL_FMT_8BIT;
+ blocks[0].dim.area.width = width;
+ blocks[0].dim.area.height = height;
+ blocks[1].pixelFormat = PIXEL_FMT_16BIT;
+ blocks[1].dim.area.width = width >> 1;
+ blocks[1].dim.area.height = height >> 1;
+
+ void *bufPtr = MemMgr_Alloc(blocks, 2);
+ CHK_P(blocks[0].ptr,==,bufPtr);
+ if (bufPtr) {
+ void *buf2 = bufPtr + blocks[0].stride * height;
+ if (NOT_P(blocks[1].ptr,==,buf2) ||
+ NOT_I(MemMgr_IsMapped(bufPtr),!=,0) ||
+ NOT_I(MemMgr_IsMapped(buf2),!=,0) ||
+ NOT_I(MemMgr_Is1DBlock(bufPtr),==,0) ||
+ NOT_I(MemMgr_Is1DBlock(buf2),==,0) ||
+ NOT_I(MemMgr_Is2DBlock(bufPtr),!=,0) ||
+ NOT_I(MemMgr_Is2DBlock(buf2),!=,0) ||
+ NOT_I(blocks[0].stride,!=,0) ||
+ NOT_I(blocks[1].stride,!=,0) ||
+ NOT_I(MemMgr_GetStride(bufPtr),==,blocks[0].stride) ||
+ NOT_I(MemMgr_GetStride(buf2),==,blocks[1].stride) ||
+ NOT_P(TilerMem_VirtToPhys(bufPtr),==,blocks[0].reserved) ||
+ NOT_P(TilerMem_VirtToPhys(buf2),==,blocks[1].reserved) ||
+ NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(bufPtr)),==,TILER_STRIDE_8BIT) ||
+ NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(buf2)),==,TILER_STRIDE_16BIT) ||
+ NOT_L((PAGE_SIZE - 1) & (long)blocks[0].ptr,==,(PAGE_SIZE - 1) & blocks[0].reserved) ||
+ NOT_L((PAGE_SIZE - 1) & (long)blocks[1].ptr,==,(PAGE_SIZE - 1) & blocks[1].reserved))
+ {
+ MemMgr_Free(bufPtr);
+ return NULL;
+ }
+
+ fill_mem(val, blocks);
+ fill_mem(val, blocks + 1);
+ } else {
+ CHK_P(blocks[1].ptr,==,NULL);
+ }
+
+ return bufPtr;
+}
+
+/**
+ * This method frees an NV12 tiled buffer. The given width,
+ * height and start values are used to verify that the buffer is
+ * still correctly filled. In the event of any errors, the
+ * error value is returned.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ * @param val Fill start value
+ * @param bufPtr Pointer to the allocated buffer
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int free_NV12(pixels_t width, pixels_t height, uint16_t val, void *bufPtr)
+{
+ MemAllocBlock blocks[2];
+ memset(blocks, 0, sizeof(blocks));
+
+ blocks[0].pixelFormat = PIXEL_FMT_8BIT;
+ blocks[0].dim.area.width = width;
+ blocks[0].dim.area.height = height;
+ blocks[0].stride = def_stride(width);
+ blocks[0].ptr = bufPtr;
+ blocks[1].pixelFormat = PIXEL_FMT_16BIT;
+ blocks[1].dim.area.width = width >> 1;
+ blocks[1].dim.area.height = height >> 1;
+ blocks[1].stride = def_stride(width);
+ blocks[1].ptr = bufPtr + blocks[0].stride * height;
+
+ int ret = A_I(check_mem(val, blocks),==,0);
+ ERR_ADD(ret, check_mem(val, blocks + 1));
+ ERR_ADD(ret, MemMgr_Free(bufPtr));
+ return ret;
+}
+
+/**
+ * This method maps a preallocated 1D buffer of the given length
+ * and stride into tiler space using MemMgr_Map. The mapped
+ * address must differ from the supplied address is successful.
+ * Moreover, it checks that the block information was
+ * updated with the pointer to the block. Additionally, it
+ * verifies the correct return values for MemMgr_IsMapped,
+ * MemMgr_Is1DBlock, MemMgr_Is2DBlock, MemMgr_GetStride,
+ * TilerMem_GetStride. It also verifies TilerMem_VirtToPhys
+ * using an internally stored value of the ssptr. If any of
+ * these verifications fail, the buffer is unmapped. Otherwise,
+ * the original buffer is filled using the given start value.
+ *
+ * :TODO: how do we verify the mapping?
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param dataPtr Pointer to the allocated buffer
+ * @param length Buffer length
+ * @param stride Buffer stride
+ * @param val Fill start value
+ *
+ * @return pointer to the mapped buffer, or NULL on failure
+ */
+void *map_1D(void *dataPtr, bytes_t length, bytes_t stride, uint16_t val)
+{
+ MemAllocBlock block;
+ memset(&block, 0, sizeof(block));
+
+ block.pixelFormat = PIXEL_FMT_PAGE;
+ block.dim.len = length;
+ block.stride = stride;
+ block.ptr = dataPtr;
+
+ void *bufPtr = MemMgr_Map(&block, 1);
+ CHK_P(bufPtr,==,block.ptr);
+ if (bufPtr) {
+ if (NOT_P(bufPtr,!=,dataPtr) ||
+ NOT_I(MemMgr_IsMapped(bufPtr),!=,0) ||
+ NOT_I(MemMgr_Is1DBlock(bufPtr),!=,0) ||
+ NOT_I(MemMgr_Is2DBlock(bufPtr),==,0) ||
+ NOT_I(MemMgr_GetStride(bufPtr),==,block.stride) ||
+ NOT_P(TilerMem_VirtToPhys(bufPtr),==,block.reserved) ||
+ NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(bufPtr)),==,PAGE_SIZE) ||
+ NOT_L((PAGE_SIZE - 1) & (long)bufPtr,==,0) ||
+ NOT_L((PAGE_SIZE - 1) & block.reserved,==,0))
+ {
+ MemMgr_UnMap(bufPtr);
+ return NULL;
+ }
+ block.ptr = dataPtr;
+ fill_mem(val, &block);
+ }
+ return bufPtr;
+}
+
+/**
+ * This method unmaps a 1D tiled buffer. The given data
+ * pointer, length, stride and start values are used to verify
+ * that the buffer is still correctly filled. In the event of
+ * any errors, the error value is returned.
+ *
+ * :TODO: how do we verify the mapping?
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param dataPtr Pointer to the preallocated buffer
+ * @param length Buffer length
+ * @param stride Buffer stride
+ * @param val Fill start value
+ * @param bufPtr Pointer to the mapped buffer
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int unmap_1D(void *dataPtr, bytes_t length, bytes_t stride, uint16_t val, void *bufPtr)
+{
+ MemAllocBlock block;
+ memset(&block, 0, sizeof(block));
+
+ block.pixelFormat = PIXEL_FMT_PAGE;
+ block.dim.len = length;
+ block.stride = stride;
+ block.ptr = dataPtr;
+ int ret = A_I(check_mem(val, &block),==,0);
+ ERR_ADD(ret, MemMgr_UnMap(bufPtr));
+ return ret;
+}
+
+/**
+ * Tests the MemMgr_PageSize method.
+ *
+ * @author a0194118 (9/15/2009)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+int page_size_test()
+{
+ return NOT_I(MemMgr_PageSize(),==,PAGE_SIZE);
+}
+
+/**
+ * This method tests the allocation and freeing of a 1D tiled
+ * buffer.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param length Buffer length
+ * @param stride Buffer stride
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int alloc_1D_test(bytes_t length, bytes_t stride)
+{
+ printf("Allocate & Free %ub 1D buffer\n", length);
+
+ uint16_t val = (uint16_t) rand();
+ void *ptr = alloc_1D(length, stride, val);
+ if (!ptr) return 1;
+ int res = free_1D(length, stride, val, ptr);
+ return res;
+}
+
+/**
+ * This method tests the allocation and freeing of a 2D tiled
+ * buffer.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ * @param fmt Pixel format
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int alloc_2D_test(pixels_t width, pixels_t height, pixel_fmt_t fmt)
+{
+ printf("Allocate & Free %ux%ux%ub 1D buffer\n", width, height, def_bpp(fmt));
+
+ uint16_t val = (uint16_t) rand();
+ void *ptr = alloc_2D(width, height, fmt, 0, val);
+ if (!ptr) return 1;
+ int res = free_2D(width, height, fmt, 0, val, ptr);
+ return res;
+}
+
+/**
+ * This method tests the allocation and freeing of an NV12 tiled
+ * buffer.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int alloc_NV12_test(pixels_t width, pixels_t height)
+{
+ printf("Allocate & Free %ux%u NV12 buffer\n", width, height);
+
+ uint16_t val = (uint16_t) rand();
+ void *ptr = alloc_NV12(width, height, val);
+ if (!ptr) return 1;
+ int res = free_NV12(width, height, val, ptr);
+ return res;
+}
+
+/**
+ * This method tests the mapping and unmapping of a 1D buffer.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param length Buffer length
+ * @param stride Buffer stride
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int map_1D_test(bytes_t length, bytes_t stride)
+{
+ length = (length + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1);
+ printf("Mapping and UnMapping 0x%xb 1D buffer\n", length);
+
+#ifdef __MAP_OK__
+ /* allocate aligned buffer */
+ void *buffer = malloc(length + PAGE_SIZE - 1);
+ void *dataPtr = (void *)(((uint32_t)buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ uint16_t val = (uint16_t) rand();
+ void *ptr = map_1D(dataPtr, length, stride, val);
+ if (!ptr) return 1;
+ int res = unmap_1D(dataPtr, length, stride, val, ptr);
+ FREE(buffer);
+#else
+ int res = TESTERR_NOTIMPLEMENTED;
+#endif
+ return res;
+}
+
+/**
+ * This method tests the allocation and freeing of a number of
+ * 1D tiled buffers (up to MAX_ALLOCS)
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param length Buffer length
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int maxalloc_1D_test(bytes_t length, int max_allocs)
+{
+ printf("Allocate & Free max # of %ub 1D buffers\n", length);
+
+ struct data {
+ uint16_t val;
+ void *bufPtr;
+ } *mem;
+
+ /* allocate as many buffers as we can */
+ mem = NEWN(struct data, max_allocs);
+ void *ptr = (void *)mem;
+ int ix, res = 0;
+ for (ix = 0; ptr && ix < max_allocs;)
+ {
+ uint16_t val = (uint16_t) rand();
+ ptr = alloc_1D(length, 0, val);
+ if (ptr)
+ {
+ mem[ix].val = val;
+ mem[ix].bufPtr = ptr;
+ ix++;
+ }
+ }
+
+ P(":: Allocated %d buffers", ix);
+
+ while (ix--)
+ {
+ ERR_ADD(res, free_1D(length, 0, mem[ix].val, mem[ix].bufPtr));
+ }
+ FREE(mem);
+ return res;
+}
+
+/**
+ * This method tests the allocation and freeing of a number of
+ * 2D tiled buffers (up to MAX_ALLOCS)
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ * @param fmt Pixel format
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int maxalloc_2D_test(pixels_t width, pixels_t height, pixel_fmt_t fmt, int max_allocs)
+{
+ printf("Allocate & Free max # of %ux%ux%ub 1D buffers\n", width, height, def_bpp(fmt));
+
+ struct data {
+ uint16_t val;
+ void *bufPtr;
+ } *mem;
+
+ /* allocate as many buffers as we can */
+ mem = NEWN(struct data, max_allocs);
+ void *ptr = (void *)mem;
+ int ix, res = 0;
+ for (ix = 0; ptr && ix < max_allocs;)
+ {
+ uint16_t val = (uint16_t) rand();
+ ptr = alloc_2D(width, height, fmt, 0, val);
+ if (ptr)
+ {
+ mem[ix].val = val;
+ mem[ix].bufPtr = ptr;
+ ix++;
+ }
+ }
+
+ P(":: Allocated %d buffers", ix);
+
+ while (ix--)
+ {
+ ERR_ADD(res, free_2D(width, height, fmt, 0, mem[ix].val, mem[ix].bufPtr));
+ }
+ FREE(mem);
+ return res;
+}
+
+/**
+ * This method tests the allocation and freeing of a number of
+ * NV12 tiled buffers (up to MAX_ALLOCS)
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param width Buffer width
+ * @param height Buffer height
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int maxalloc_NV12_test(pixels_t width, pixels_t height, int max_allocs)
+{
+ printf("Allocate & Free max # of %ux%u NV12 buffers\n", width, height);
+
+ struct data {
+ uint16_t val;
+ void *bufPtr;
+ } *mem;
+
+ /* allocate as many buffers as we can */
+ mem = NEWN(struct data, max_allocs);
+ void *ptr = (void *)mem;
+ int ix, res = 0;
+ for (ix = 0; ptr && ix < max_allocs;)
+ {
+ uint16_t val = (uint16_t) rand();
+ ptr = alloc_NV12(width, height, val);
+ if (ptr)
+ {
+ mem[ix].val = val;
+ mem[ix].bufPtr = ptr;
+ ix++;
+ }
+ }
+
+ P(":: Allocated %d buffers", ix);
+
+ while (ix--)
+ {
+ ERR_ADD(res, free_NV12(width, height, mem[ix].val, mem[ix].bufPtr));
+ }
+ FREE(mem);
+ return res;
+}
+
+/**
+ * This method tests the mapping and unnapping of a number of
+ * 1D buffers (up to MAX_ALLOCS)
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param length Buffer length
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int maxmap_1D_test(bytes_t length, int max_maps)
+{
+ length = (length + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1);
+ printf("Map & UnMap max # of %xb 1D buffers\n", length);
+
+#ifdef __MAP_OK__
+ struct data {
+ uint16_t val;
+ void *bufPtr, *buffer, *dataPtr;
+ } *mem;
+
+ /* map as many buffers as we can */
+ mem = NEWN(struct data, max_maps);
+ void *ptr = (void *)mem;
+ int ix, res = 0;
+ for (ix = 0; ptr && ix < max_maps;)
+ {
+ /* allocate aligned buffer */
+ ptr = malloc(length + PAGE_SIZE - 1);
+ if (ptr)
+ {
+ void *buffer = ptr;
+ void *dataPtr = (void *)(((uint32_t)buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ uint16_t val = (uint16_t) rand();
+ ptr = map_1D(dataPtr, length, 0, val);
+ if (ptr)
+ {
+ mem[ix].val = val;
+ mem[ix].bufPtr = ptr;
+ mem[ix].buffer = buffer;
+ mem[ix].dataPtr = dataPtr;
+ ix++;
+ }
+ else
+ {
+ FREE(buffer);
+ break;
+ }
+ }
+ }
+
+ P(":: Mapped %d buffers", ix);
+
+ while (ix--)
+ {
+ ERR_ADD(res, unmap_1D(mem[ix].dataPtr, length, 0, mem[ix].val, mem[ix].bufPtr));
+ FREE(mem[ix].buffer);
+ }
+#else
+ int res = TESTERR_NOTIMPLEMENTED;
+#endif
+ return res;
+}
+
+/**
+ * This stress tests allocates/maps/frees/unmaps buffers at
+ * least num_ops times. The test maintains a set of slots that
+ * are initially NULL. For each operation, a slot is randomly
+ * selected. If the slot is not used, it is filled randomly
+ * with a 1D, 2D, NV12 or mapped buffer. If it is used, the
+ * slot is cleared by freeing/unmapping the buffer already
+ * there. The buffers are filled on alloc/map and this is
+ * checked on free/unmap to verify that there was no memory
+ * corruption. Failed allocation and maps are ignored as we may
+ * run out of memory. The return value is the first error code
+ * encountered, or 0 on success.
+ *
+ * This test sets the seed so that it produces reproducible
+ * results.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param num_ops Number of operations to perform
+ * @param num_slots Number of slots to maintain
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int star_test(uint32_t num_ops, uint16_t num_slots)
+{
+ printf("Random set of %d Allocs/Maps and Frees/UnMaps for %d slots\n", num_ops, num_slots);
+ srand(0x4B72316A);
+ struct data {
+ int op;
+ uint16_t val;
+ pixels_t width, height;
+ bytes_t length;
+ void *bufPtr;
+ void *buffer;
+ void *dataPtr;
+ } *mem;
+
+ /* allocate memory state */
+ mem = NEWN(struct data, num_slots);
+ if (!mem) return NOT_P(mem,!=,NULL);
+
+ /* perform alloc/free/unmaps */
+ int res = 0, ix;
+ while (!res && num_ops--)
+ {
+ ix = rand() % num_slots;
+ /* see if we need to free/unmap data */
+ if (mem[ix].bufPtr)
+ {
+ /* check memory fill */
+ switch (mem[ix].op)
+ {
+ case 0: res = unmap_1D(mem[ix].dataPtr, mem[ix].length, 0, mem[ix].val, mem[ix].bufPtr);
+ FREE(mem[ix].buffer);
+ break;
+ case 1: res = free_1D(mem[ix].length, 0, mem[ix].val, mem[ix].bufPtr); break;
+ case 2: res = free_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_8BIT, 0, mem[ix].val, mem[ix].bufPtr); break;
+ case 3: res = free_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_16BIT, 0, mem[ix].val, mem[ix].bufPtr); break;
+ case 4: res = free_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_32BIT, 0, mem[ix].val, mem[ix].bufPtr); break;
+ case 5: res = free_NV12(mem[ix].width, mem[ix].height, mem[ix].val, mem[ix].bufPtr); break;
+ }
+ P("%s[%p]", mem[ix].op ? "free" : "unmap", mem[ix].bufPtr);
+ ZERO(mem[ix]);
+ }
+ /* we need to allocate/map data */
+ else
+ {
+ int op = rand();
+ /* set width */
+ pixels_t width, height;
+ switch ("AAAABBBBCCCDDEEF"[op & 15]) {
+ case 'F': width = 1920; height = 1080; break;
+ case 'E': width = 1280; height = 720; break;
+ case 'D': width = 640; height = 480; break;
+ case 'C': width = 848; height = 480; break;
+ case 'B': width = 176; height = 144; break;
+ case 'A': width = height = 64; break;
+ }
+ mem[ix].length = (bytes_t)width * height;
+ mem[ix].width = width;
+ mem[ix].height = height;
+ mem[ix].val = ((uint16_t)rand());
+
+ /* perform operation */
+ mem[ix].op = "AAABBBBCCCCDDDDE"[(op >> 4) & 15] - 'A';
+ switch (mem[ix].op)
+ {
+ case 0: /* map 1D buffer */
+#ifdef __MAP_OK__
+ /* allocate aligned buffer */
+ mem[ix].length = (mem[ix].length + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1);
+ mem[ix].buffer = malloc(mem[ix].length + PAGE_SIZE - 1);
+ if (mem[ix].buffer)
+ {
+ mem[ix].dataPtr = (void *)(((uint32_t)mem[ix].buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ mem[ix].bufPtr = map_1D(mem[ix].dataPtr, mem[ix].length, 0, mem[ix].val);
+ if (!mem[ix].bufPtr) FREE(mem[ix].buffer);
+ }
+ P("map[l=0x%x] = %p", mem[ix].length, mem[ix].bufPtr);
+ break;
+#else
+ mem[ix].op = 1;
+#endif
+ case 1:
+ mem[ix].bufPtr = alloc_1D(mem[ix].length, 0, mem[ix].val);
+ P("alloc[l=0x%x] = %p", mem[ix].length, mem[ix].bufPtr);
+ break;
+ case 2:
+ mem[ix].bufPtr = alloc_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_8BIT, 0, mem[ix].val);
+ P("alloc[%d*%d*8] = %p", mem[ix].width, mem[ix].height, mem[ix].bufPtr);
+ break;
+ case 3:
+ mem[ix].bufPtr = alloc_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_16BIT, 0, mem[ix].val);
+ P("alloc[%d*%d*16] = %p", mem[ix].width, mem[ix].height, mem[ix].bufPtr);
+ break;
+ case 4:
+ mem[ix].bufPtr = alloc_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_32BIT, 0, mem[ix].val);
+ P("alloc[%d*%d*32] = %p", mem[ix].width, mem[ix].height, mem[ix].bufPtr);
+ break;
+ case 5:
+ mem[ix].bufPtr = alloc_NV12(mem[ix].width, mem[ix].height, mem[ix].val);
+ P("alloc[%d*%d*NV12] = %p", mem[ix].width, mem[ix].height, mem[ix].bufPtr);
+ break;
+ }
+
+ /* check all previous buffers */
+#ifdef STAR_TRACE_MEM
+ for (ix = 0; ix < num_slots; ix++)
+ {
+ MemAllocBlock blk;
+ if (mem[ix].bufPtr)
+ {
+ if(0) P("ptr=%p, op=%d, w=%d, h=%d, l=%x, val=%x",
+ mem[ix].bufPtr, mem[ix].op, mem[ix].width, mem[ix].height,
+ mem[ix].length, mem[ix].val);
+ switch (mem[ix].op)
+ {
+ case 0: case 1:
+ blk.pixelFormat = PIXEL_FMT_PAGE;
+ blk.dim.len = mem[ix].length;
+ break;
+ case 5:
+ blk.pixelFormat = PIXEL_FMT_16BIT;
+ blk.dim.area.width = mem[ix].width >> 1;
+ blk.dim.area.height = mem[ix].height >> 1;
+ blk.stride = def_stride(mem[ix].width); /* same for Y and UV */
+ blk.ptr = mem[ix].bufPtr + mem[ix].height * blk.stride;
+ check_mem(mem[ix].val, &blk);
+ case 2:
+ blk.pixelFormat = PIXEL_FMT_8BIT;
+ blk.dim.area.width = mem[ix].width;
+ blk.dim.area.height = mem[ix].height;
+ blk.stride = def_stride(mem[ix].width);
+ break;
+ case 3:
+ blk.pixelFormat = PIXEL_FMT_16BIT;
+ blk.dim.area.width = mem[ix].width;
+ blk.dim.area.height = mem[ix].height;
+ blk.stride = def_stride(mem[ix].width * 2);
+ break;
+ case 4:
+ blk.pixelFormat = PIXEL_FMT_32BIT;
+ blk.dim.area.width = mem[ix].width;
+ blk.dim.area.height = mem[ix].height;
+ blk.stride = def_stride(mem[ix].width * 4);
+ break;
+ }
+ blk.ptr = mem[ix].bufPtr;
+ check_mem(mem[ix].val, &blk);
+ }
+ }
+#endif
+ }
+ }
+
+ /* unmap and free everything */
+ for (ix = 0; ix < num_slots; ix++)
+ {
+ if (mem[ix].bufPtr)
+ {
+ /* check memory fill */
+ switch (mem[ix].op)
+ {
+ case 0: ERR_ADD(res, unmap_1D(mem[ix].dataPtr, mem[ix].length, 0, mem[ix].val, mem[ix].bufPtr));
+ FREE(mem[ix].buffer);
+ break;
+ case 1: ERR_ADD(res, free_1D(mem[ix].length, 0, mem[ix].val, mem[ix].bufPtr)); break;
+ case 2: ERR_ADD(res, free_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_8BIT, 0, mem[ix].val, mem[ix].bufPtr)); break;
+ case 3: ERR_ADD(res, free_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_16BIT, 0, mem[ix].val, mem[ix].bufPtr)); break;
+ case 4: ERR_ADD(res, free_2D(mem[ix].width, mem[ix].height, PIXEL_FMT_32BIT, 0, mem[ix].val, mem[ix].bufPtr)); break;
+ case 5: ERR_ADD(res, free_NV12(mem[ix].width, mem[ix].height, mem[ix].val, mem[ix].bufPtr)); break;
+ }
+ }
+ }
+ FREE(mem);
+
+ return res;
+}
+
+/**
+ * This stress tests allocates/maps/frees/unmaps buffers at
+ * least num_ops times. The test maintains a set of slots that
+ * are initially NULL. For each operation, a slot is randomly
+ * selected. If the slot is not used, it is filled randomly
+ * with a 1D, 2D, NV12 or mapped buffer. If it is used, the
+ * slot is cleared by freeing/unmapping the buffer already
+ * there. The buffers are filled on alloc/map and this is
+ * checked on free/unmap to verify that there was no memory
+ * corruption. Failed allocation and maps are ignored as we may
+ * run out of memory. The return value is the first error code
+ * encountered, or 0 on success.
+ *
+ * This test sets the seed so that it produces reproducible
+ * results.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param num_ops Number of operations to perform
+ * @param num_slots Number of slots to maintain
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int star_tiler_test(uint32_t num_ops, uint16_t num_slots)
+{
+ printf("Random set of %d tiler Allocs/Maps and Frees/UnMaps for %d slots\n", num_ops, num_slots);
+ srand(0x4B72316A);
+ struct data {
+ int op;
+ struct tiler_block_info blk;
+ void *buffer;
+ } *mem;
+
+ /* allocate memory state */
+ mem = NEWN(struct data, num_slots);
+ if (!mem) return NOT_P(mem,!=,NULL);
+
+ /* perform alloc/free/unmaps */
+ int ix, td = A_S(open("/dev/tiler", O_RDWR),>=,0), res = td < 0 ? td : 0;
+ while (!res && num_ops--)
+ {
+ ix = rand() % num_slots;
+ /* see if we need to free/unmap data */
+ if (mem[ix].blk.id)
+ {
+ P("free [0x%x(0x%x)]", mem[ix].blk.id, mem[ix].blk.ssptr);
+ res = A_S(ioctl(td, TILIOC_FBLK, &mem[ix].blk),==,0);
+ FREE(mem[ix].buffer);
+ ZERO(mem[ix]);
+ }
+ /* we need to allocate/map data */
+ else
+ {
+ int op = rand();
+ /* set width */
+ pixels_t width, height;
+ switch ("AAAABBBBCCCDDEEF"[op & 15]) {
+ case 'F': width = 1920; height = 1080; break;
+ case 'E': width = 1280; height = 720; break;
+ case 'D': width = 640; height = 480; break;
+ case 'C': width = 848; height = 480; break;
+ case 'B': width = 176; height = 144; break;
+ case 'A': width = height = 64; break;
+ }
+ bytes_t length = (bytes_t)width * height;
+
+ /* perform operation */
+ mem[ix].op = "AAABBBBCCCCDDDDE"[(op >> 4) & 15] - 'A';
+ switch (mem[ix].op)
+ {
+ case 0: /* map 1D buffer */
+ /* allocate aligned buffer */
+ length = (length + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1);
+ mem[ix].buffer = malloc(length + PAGE_SIZE - 1);
+ if (mem[ix].buffer)
+ {
+ mem[ix].blk.dim.len = length;
+ mem[ix].blk.fmt = TILFMT_PAGE;
+ mem[ix].blk.ptr = (void *)(((uint32_t)mem[ix].buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ res = A_S(ioctl(td, TILIOC_MBLK, &mem[ix].blk),==,0);
+ if (res)
+ FREE(mem[ix].buffer);
+ }
+ P("map[l=0x%x] = 0x%x(0x%x)", length, mem[ix].blk.id, mem[ix].blk.ssptr);
+ break;
+ case 1:
+ mem[ix].blk.dim.len = length;
+ mem[ix].blk.fmt = TILFMT_PAGE;
+ res = A_S(ioctl(td, TILIOC_GBLK, &mem[ix].blk),==,0);
+ P("alloc[l=0x%x] = 0x%x(0x%x)", length, mem[ix].blk.id, mem[ix].blk.ssptr);
+ break;
+ case 2: case 3: case 4:
+ mem[ix].blk.dim.area.width = width;
+ mem[ix].blk.dim.area.height = height;
+ mem[ix].blk.fmt = TILFMT_8BIT + mem[ix].op - 2;
+ res = A_S(ioctl(td, TILIOC_GBLK, &mem[ix].blk),==,0);
+ P("alloc[%d*%d*%d] = 0x%x(0x%x)", width, height, 8 << (mem[ix].op -2), mem[ix].blk.id, mem[ix].blk.ssptr);
+ break;
+ }
+ }
+ }
+
+ /* unmap and free everything */
+ for (ix = 0; ix < num_slots; ix++)
+ {
+ if (mem[ix].blk.id)
+ {
+ res = A_S(ioctl(td, TILIOC_FBLK, &mem[ix].blk),==,0);
+ FREE(mem[ix].buffer);
+ }
+ }
+ ERR_ADD_S(res, close(td));
+ FREE(mem);
+
+ return res;
+}
+
+#define NEGA(exp) ({ void *__ptr__ = A_P(exp,==,NULL); if (__ptr__) MemMgr_Free(__ptr__); __ptr__ != NULL; })
+
+/**
+ * Performs negative tests for MemMgr_Alloc.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int neg_alloc_tests()
+{
+ printf("Negative Alloc tests\n");
+
+ MemAllocBlock block[2], *blk;
+ memset(&block, 0, sizeof(block));
+
+ int ret = 0, num_blocks;
+
+ for (num_blocks = 1; num_blocks < 3; num_blocks++)
+ {
+ blk = block + num_blocks - 1;
+
+ P("/* bad pixel format */");
+ blk->pixelFormat = PIXEL_FMT_MIN - 1;
+ blk->dim.len = PAGE_SIZE;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+ blk->pixelFormat = PIXEL_FMT_MAX + 1;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+
+ P("/* bad 1D stride */");
+ blk->pixelFormat = PIXEL_FMT_PAGE;
+ blk->stride = PAGE_SIZE - 1;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+
+ P("/* 0 1D length */");
+ blk->dim.len = blk->stride = 0;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+
+ P("/* bad 2D stride */");
+ blk->pixelFormat = PIXEL_FMT_8BIT;
+ blk->dim.area.width = PAGE_SIZE - 1;
+ blk->stride = PAGE_SIZE - 1;
+ blk->dim.area.height = 16;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+
+ P("/* bad 2D width */");
+ blk->stride = blk->dim.area.width = 0;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+
+ P("/* bad 2D height */");
+ blk->dim.area.height = 0;
+ blk->dim.area.width = 16;
+ ret |= NEGA(MemMgr_Alloc(block, num_blocks));
+
+ /* good 2D block */
+ blk->dim.area.height = 16;
+ }
+
+ block[0].pixelFormat = block[1].pixelFormat = PIXEL_FMT_8BIT;
+ block[0].dim.area.width = 16384;
+ block[0].dim.area.height = block[1].dim.area.width = 16;
+ block[1].dim.area.height = 8192;
+ ret |= NEGA(MemMgr_Alloc(block, 2));
+
+ return ret;
+}
+
+/**
+ * Performs negative tests for MemMgr_Free.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int neg_free_tests()
+{
+ printf("Negative Free tests\n");
+
+ void *ptr = alloc_2D(2500, 10, PIXEL_FMT_16BIT, 2 * PAGE_SIZE, 0);
+ int ret = 0;
+
+ MemMgr_Free(ptr);
+
+ P("/* free something twice */");
+ ret |= NOT_I(MemMgr_Free(ptr),!=,0);
+
+ P("/* free NULL */");
+ ret |= NOT_I(MemMgr_Free(NULL),!=,0);
+
+ P("/* free arbitrary value */");
+ ret |= NOT_I(MemMgr_Free((void *)0x12345678),!=,0);
+
+ P("/* free mapped buffer */");
+ void *buffer = malloc(PAGE_SIZE * 2);
+ void *dataPtr = (void *)(((uint32_t)buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ ptr = map_1D(dataPtr, PAGE_SIZE, 0, 0);
+ ret |= NOT_I(MemMgr_Free(ptr),!=,0);
+
+ MemMgr_UnMap(ptr);
+
+ return ret;
+}
+
+#define NEGM(exp) ({ void *__ptr__ = A_P(exp,==,NULL); if (__ptr__) MemMgr_UnMap(__ptr__); __ptr__ != NULL; })
+
+/**
+ * Performs negative tests for MemMgr_Map.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int neg_map_tests()
+{
+ printf("Negative Map tests\n");
+
+ MemAllocBlock block[2], *blk;
+ memset(&block, 0, sizeof(block));
+
+ int ret = 0, num_blocks;
+
+ for (num_blocks = 1; num_blocks < 3; num_blocks++)
+ {
+ blk = block + num_blocks - 1;
+
+ P("/* bad pixel format */");
+ blk->pixelFormat = PIXEL_FMT_MIN - 1;
+ blk->dim.len = PAGE_SIZE;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+ blk->pixelFormat = PIXEL_FMT_MAX + 1;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+
+ P("/* bad 1D stride */");
+ blk->pixelFormat = PIXEL_FMT_PAGE;
+ blk->stride = PAGE_SIZE - 1;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+
+ P("/* 0 1D length */");
+ blk->dim.len = blk->stride = 0;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+
+ P("/* bad 2D stride */");
+ blk->pixelFormat = PIXEL_FMT_8BIT;
+ blk->dim.area.width = PAGE_SIZE - 1;
+ blk->stride = PAGE_SIZE - 1;
+ blk->dim.area.height = 16;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+
+ P("/* bad 2D width */");
+ blk->stride = blk->dim.area.width = 0;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+
+ P("/* bad 2D height */");
+ blk->dim.area.height = 0;
+ blk->dim.area.width = 16;
+ ret |= NEGM(MemMgr_Map(block, num_blocks));
+
+ /* good 2D block */
+ blk->dim.area.height = 16;
+ }
+
+ P("/* 2 buffers */");
+ ret |= NEGM(MemMgr_Map(block, 2));
+
+ P("/* 1 2D buffer */");
+ ret |= NEGM(MemMgr_Map(block, 1));
+
+ P("/* 1 1D buffer with no address */");
+ block[0].pixelFormat = PIXEL_FMT_PAGE;
+ block[0].dim.len = 2 * PAGE_SIZE;
+ block[0].ptr = NULL;
+ ret |= NEGM(MemMgr_Map(block, 1));
+
+ P("/* 1 1D buffer with not aligned start address */");
+ void *buffer = malloc(3 * PAGE_SIZE);
+ void *dataPtr = (void *)(((uint32_t)buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ block[0].ptr = dataPtr + 3;
+ ret |= NEGM(MemMgr_Map(block, 1));
+
+ P("/* 1 1D buffer with not aligned length */");
+ block[0].ptr = dataPtr;
+ block[0].dim.len -= 5;
+ ret |= NEGM(MemMgr_Map(block, 1));
+
+#if 0 /* TODO: it's possible that our va falls within the TILER addr range */
+ P("/* Mapping a tiled 1D buffer */");
+ void *ptr = alloc_1D(PAGE_SIZE * 2, 0, 0);
+ dataPtr = (void *)(((uint32_t)ptr + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ block[0].ptr = dataPtr;
+ block[0].dim.len = PAGE_SIZE;
+ ret |= NEGM(MemMgr_Map(block, 1));
+
+ MemMgr_Free(ptr);
+#endif
+
+ return ret;
+}
+
+/**
+ * Performs negative tests for MemMgr_UnMap.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int neg_unmap_tests()
+{
+ printf("Negative Unmap tests\n");
+
+ void *ptr = alloc_1D(PAGE_SIZE, 0, 0);
+ int ret = 0;
+
+ P("/* unmap alloced buffer */");
+ ret |= NOT_I(MemMgr_UnMap(ptr),!=,0);
+
+ MemMgr_Free(ptr);
+
+ void *buffer = malloc(PAGE_SIZE * 2);
+ void *dataPtr = (void *)(((uint32_t)buffer + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+ ptr = map_1D(dataPtr, PAGE_SIZE, 0, 0);
+ MemMgr_UnMap(ptr);
+
+ P("/* unmap something twice */");
+ ret |= NOT_I(MemMgr_UnMap(ptr),!=,0);
+
+ P("/* unmap NULL */");
+ ret |= NOT_I(MemMgr_UnMap(NULL),!=,0);
+
+ P("/* unmap arbitrary value */");
+ ret |= NOT_I(MemMgr_UnMap((void *)0x12345678),!=,0);
+
+ return ret;
+}
+
+/**
+ * Performs negative tests for MemMgr_Is.. functions.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int neg_check_tests()
+{
+ printf("Negative Is... tests\n");
+ void *ptr = malloc(32);
+
+ int ret = 0;
+
+ ret |= NOT_I(MemMgr_Is1DBlock(NULL),==,FALSE);
+ ret |= NOT_I(MemMgr_Is1DBlock((void *)0x12345678),==,FALSE);
+ ret |= NOT_I(MemMgr_Is1DBlock(ptr),==,FALSE);
+ ret |= NOT_I(MemMgr_Is2DBlock(NULL),==,FALSE);
+ ret |= NOT_I(MemMgr_Is2DBlock((void *)0x12345678),==,FALSE);
+ ret |= NOT_I(MemMgr_Is2DBlock(ptr),==,FALSE);
+ ret |= NOT_I(MemMgr_IsMapped(NULL),==,FALSE);
+ ret |= NOT_I(MemMgr_IsMapped((void *)0x12345678),==,FALSE);
+ ret |= NOT_I(MemMgr_IsMapped(ptr),==,FALSE);
+
+ ret |= NOT_I(MemMgr_GetStride(NULL),==,0);
+ ret |= NOT_I(MemMgr_GetStride((void *)0x12345678),==,0);
+ ret |= NOT_I(MemMgr_GetStride(ptr),==,PAGE_SIZE);
+
+ ret |= NOT_P(TilerMem_VirtToPhys(NULL),==,0);
+ ret |= NOT_P(TilerMem_VirtToPhys((void *)0x12345678),==,0);
+ ret |= NOT_P(TilerMem_VirtToPhys(ptr),!=,0);
+
+ ret |= NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(NULL)),==,0);
+ ret |= NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys((void *)0x12345678)),==,0);
+ ret |= NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(ptr)),==,0);
+
+ FREE(ptr);
+
+ return ret;
+}
+
+DEFINE_TESTS(TESTS)
+
+/**
+ * We run the same identity check before and after running the
+ * tests.
+ *
+ * @author a0194118 (9/12/2009)
+ */
+void memmgr_identity_test(void *ptr)
+{
+ /* also execute internal unit tests - this also verifies that we did not
+ keep any references */
+ __test__MemMgr();
+}
+
+/**
+ * Main test function. Checks arguments for test case ranges,
+ * runs tests and prints usage or test list if required.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param argc Number of arguments
+ * @param argv Arguments
+ *
+ * @return -1 on usage or test list, otherwise # of failed
+ * tests.
+ */
+int main(int argc, char **argv)
+{
+ return TestLib_Run(argc, argv,
+ memmgr_identity_test, memmgr_identity_test, NULL);
+}
+
diff --git a/tiler/testlib.c b/tiler/testlib.c
new file mode 100644
index 0000000..63d1298
--- /dev/null
+++ b/tiler/testlib.c
@@ -0,0 +1,163 @@
+/*
+ * testlib.c
+ *
+ * Unit test interface.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* retrieve type definitions */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "testlib.h"
+
+#include <utils.h>
+#include <debug_utils.h>
+
+#define TESTLIB_OK 0
+#define TESTLIB_FAIL 1
+
+/** Returns TRUE iff str is a whole unsigned int */
+#define is_uint(str) \
+ ({ unsigned i; char c; sscanf(str, "%u%c", &i, &c) == 1; })
+
+extern int __internal__TestLib_DoList(int id);
+
+/**
+ * Prints test result and returns summary result
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param res Test result
+ *
+ * @return TEST_RESULT_OK on success, TEST_RESULT_FAIL on
+ * failure, TEST_RESULT_UNAVAILABLE if test is not
+ * available
+ */
+int __internal__TestLib_Report(int res)
+{
+ switch (res)
+ {
+ case TESTLIB_UNAVAILABLE:
+ printf("==> TEST NOT AVAILABLE\n");
+ fflush(stdout);
+ return TESTLIB_UNAVAILABLE;
+ case 0:
+ printf("==> TEST OK\n");
+ fflush(stdout);
+ return TESTLIB_OK;
+ default:
+ printf("==> TEST FAIL(%d)\n", res);
+ fflush(stdout);
+ return TESTLIB_FAIL;
+ }
+}
+
+void __internal__TestLib_NullFn(void *ptr)
+{
+}
+
+int TestLib_Run(int argc, char **argv, void(*init_fn)(void *),
+ void(*exit_fn)(void *), void *ptr)
+{
+ int start, end, res, failed = 0, succeeded = 0, unavailable = 0;
+
+ /* all tests */
+ if (argc == 1)
+ {
+ start = 1; end = -1;
+ }
+ /* test list */
+ else if (argc == 2 && !strcmp(argv[1], "list"))
+ {
+ __internal__TestLib_DoList(0);
+ return -1;
+ }
+ /* single test */
+ else if (argc == 2 && is_uint(argv[1]))
+ {
+ start = end = atoi(argv[1]);
+ }
+ /* open range .. b */
+ else if (argc == 3 && !strcmp(argv[1], "..") && is_uint(argv[2]))
+ {
+ start = 1;
+ end = atoi(argv[2]);
+ }
+ /* open range a .. */
+ else if (argc == 3 && !strcmp(argv[2], "..") && is_uint(argv[1]))
+ {
+ start = atoi(argv[1]);
+ end = -1;
+ }
+ else if (argc == 4 && !strcmp(argv[2], "..") && is_uint(argv[1]) && is_uint(argv[3]))
+ {
+ start = atoi(argv[1]);
+ end = atoi(argv[3]);
+ }
+ else
+ {
+ fprintf(stderr, "Usage: %s [<range>], where <range> is\n"
+ " empty: run all tests\n"
+ " list: list tests\n"
+ " ix: run test #ix\n"
+ " a ..: run tests #a, #a+1, ...\n"
+ " .. b: run tests #1, #2, .. #b\n"
+ " a .. b: run tests #a, #a+1, .. #b\n", argv[0]);
+ fflush(stderr);
+ return -1;
+ }
+
+ /* execute tests */
+ init_fn(ptr);
+
+ do
+ {
+ res = __internal__TestLib_DoList(start++);
+ if (res == TESTLIB_FAIL) failed++;
+ else if (res == TESTLIB_OK) succeeded++;
+ else if (res == TESTLIB_UNAVAILABLE) unavailable++;
+ printf("so far FAILED: %d, SUCCEEDED: %d, UNAVAILABLE: %d\n", failed, succeeded,
+ unavailable);
+ fflush(stdout);
+ } while (res != TESTLIB_INVALID && (end < 0 || start <= end));
+
+ printf("FAILED: %d, SUCCEEDED: %d, UNAVAILABLE: %d\n", failed, succeeded,
+ unavailable);
+ fflush(stdout);
+
+ /* also execute internal unit tests - this also verifies that we did not
+ keep any references */
+ exit_fn(ptr);
+
+ return failed;
+}
+
diff --git a/tiler/testlib.h b/tiler/testlib.h
new file mode 100644
index 0000000..bc1a799
--- /dev/null
+++ b/tiler/testlib.h
@@ -0,0 +1,101 @@
+/*
+ * testlib.h
+ *
+ * Unit test interface API.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TESTLIB_H_
+#define _TESTLIB_H_
+
+/* error type definitions */
+#define TESTLIB_UNAVAILABLE -65378
+#define TESTLIB_INVALID -1
+
+#define T(test) ++i; \
+ if (!id || i == id) printf("TEST #% 3d - %s\n", i, #test); \
+ if (i == id) { \
+ printf("TEST_DESC - "); \
+ fflush(stdout); \
+ return __internal__TestLib_Report(test); \
+ }
+
+/* test run function that must be defined from the test app */
+
+/**
+ * Runs a specified test by id, or lists all test cases. This
+ * function uses the TESTS macros, and defines each T(test) to
+ * run a test starting from id == 1, and then return the result.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param id Test case id, or 0 if only listing test cases
+ *
+ * @return Summary result: TEST_RESULT_OK, FAIL, INVALID or
+ * UNAVAILABLE.
+ */
+#define TESTS_ \
+ int __internal__TestLib_DoList(int id) { int i = 0;
+
+#define _TESTS \
+ return TESTLIB_INVALID; }
+
+#define DEFINE_TESTS(TESTS) TESTS_ TESTS _TESTS
+
+/* internal function prototypes and defines */
+extern int __internal__TestLib_Report(int res);
+extern void __internal__TestLib_NullFn(void *ptr);
+
+#define nullfn __internal__TestLib_NullFn
+
+/**
+ * Parses argument list, prints usage on error, lists test
+ * cases, runs tests and reports results.
+ *
+ * @author a0194118 (9/12/2009)
+ *
+ * @param argc Number of test arguments
+ * @param argv Test argument array
+ * @param init_fn Initialization function of void fn(void *).
+ * This is called before the testing.
+ * @param exit_fn Deinit function of void fn(void *). This is
+ * done after the testing concludes.
+ * @param ptr Custom pointer that is passed into the
+ * initialization functions.
+ *
+ * @return # of test cases failed, 0 on success, -1 if no tests
+ * were run because of an error or a list request.
+ */
+int TestLib_Run(int argc, char **argv, void(*init_fn)(void *),
+ void(*exit_fn)(void *), void *ptr);
+
+#endif
+
diff --git a/tiler/tiler.h b/tiler/tiler.h
new file mode 100644
index 0000000..8358f8d
--- /dev/null
+++ b/tiler/tiler.h
@@ -0,0 +1,105 @@
+/*
+ * tiler.h
+ *
+ * TILER driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TILER_H_
+#define _TILER_H_
+
+#define TILER_MEM_8BIT 0x60000000
+#define TILER_MEM_16BIT 0x68000000
+#define TILER_MEM_32BIT 0x70000000
+#define TILER_MEM_PAGED 0x78000000
+#define TILER_MEM_END 0x80000000
+
+#define TILER_PAGE 0x1000
+#define TILER_WIDTH 256
+#define TILER_HEIGHT 128
+#define TILER_BLOCK_WIDTH 64
+#define TILER_BLOCK_HEIGHT 64
+#define TILER_LENGTH (TILER_WIDTH * TILER_HEIGHT * TILER_PAGE)
+
+#define TILER_DEVICE_PATH "/dev/tiler"
+#define TILER_MAX_NUM_BLOCKS 16
+
+enum tiler_fmt {
+ TILFMT_MIN = -2,
+ TILFMT_INVALID = -2,
+ TILFMT_NONE = -1,
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT = 1,
+ TILFMT_32BIT = 2,
+ TILFMT_PAGE = 3,
+ TILFMT_MAX = 3,
+ TILFMT_8AND16 = 4,
+};
+
+struct area {
+ uint16_t width;
+ uint16_t height;
+};
+
+struct tiler_block_info {
+ enum tiler_fmt fmt;
+ union {
+ struct area area;
+ uint32_t len;
+ } dim;
+ uint32_t stride;
+ void *ptr;
+ uint32_t id;
+ uint32_t key;
+ uint32_t group_id;
+ uint32_t ssptr;
+};
+
+struct tiler_buf_info {
+ uint32_t num_blocks;
+ struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
+ uint32_t offset;
+ uint32_t length;
+};
+
+#define TILIOC_GBLK _IOWR('z', 100, struct tiler_block_info)
+#define TILIOC_FBLK _IOW('z', 101, struct tiler_block_info)
+#define TILIOC_GSSP _IOWR('z', 102, uint32_t)
+#define TILIOC_MBLK _IOWR('z', 103, struct tiler_block_info)
+#define TILIOC_UMBLK _IOW('z', 104, struct tiler_block_info)
+#define TILIOC_QBUF _IOWR('z', 105, struct tiler_buf_info)
+#define TILIOC_RBUF _IOWR('z', 106, struct tiler_buf_info)
+#define TILIOC_URBUF _IOWR('z', 107, struct tiler_buf_info)
+#define TILIOC_QBLK _IOWR('z', 108, struct tiler_block_info)
+#define TILIOC_PRBLK _IOW('z', 109, struct tiler_block_info)
+#define TILIOC_URBLK _IOW('z', 110, uint32_t)
+
+#endif
diff --git a/tiler/tiler_ptest.c b/tiler/tiler_ptest.c
new file mode 100644
index 0000000..7f06940
--- /dev/null
+++ b/tiler/tiler_ptest.c
@@ -0,0 +1,729 @@
+/*
+ * tiler_ptest.c
+ *
+ * Memory Allocator Interface tests.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* retrieve type definitions */
+#define __DEBUG__
+#undef __DEBUG_ENTRY__
+#define __DEBUG_ASSERT__
+
+#undef __WRITE_IN_STRIDE__
+#undef STAR_TRACE_MEM
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <stdint.h>
+#include <ctype.h>
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+#include <utils.h>
+#include <list_utils.h>
+#include <debug_utils.h>
+#include <memmgr.h>
+#include <tilermem.h>
+#include <tilermem_utils.h>
+#include <testlib.h>
+
+#define FALSE 0
+
+/**
+ * Returns the default page stride for this block
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param width Width of 2D container
+ *
+ * @return Stride
+ */
+static bytes_t def_stride(pixels_t width)
+{
+ return (PAGE_SIZE - 1 + (bytes_t)width) & ~(PAGE_SIZE - 1);
+}
+
+/**
+ * Returns the bytes per pixel for the pixel format.
+ *
+ * @author a0194118 (9/4/2009)
+ *
+ * @param pixelFormat Pixelformat
+ *
+ * @return Bytes per pixel
+ */
+static bytes_t def_bpp(pixel_fmt_t pixelFormat)
+{
+ return (pixelFormat == PIXEL_FMT_32BIT ? 4 :
+ pixelFormat == PIXEL_FMT_16BIT ? 2 : 1);
+}
+
+enum ptr_type {
+ ptr_empty = 0,
+ ptr_alloced,
+ ptr_tiler_alloced,
+};
+
+struct ptr_info {
+ int num_blocks;
+ struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
+ int ptr;
+ short type;
+ uint16_t val;
+};
+
+static void dump_block(struct tiler_block_info *blk, char *prefix, char *suffix)
+{
+ switch (blk->fmt)
+ {
+ case PIXEL_FMT_PAGE:
+ P("%s [p=%p(0x%x),l=0x%x,s=%d]%s", prefix, blk->ptr, blk->ssptr,
+ blk->dim.len, blk->stride, suffix);
+ break;
+ case PIXEL_FMT_8BIT:
+ case PIXEL_FMT_16BIT:
+ case PIXEL_FMT_32BIT:
+ P("%s [p=%p(0x%x),%d*%d*%d,s=%d]%s", prefix, blk->ptr, blk->ssptr,
+ blk->dim.area.width, blk->dim.area.height, def_bpp(blk->fmt) * 8,
+ blk->stride, suffix);
+ break;
+ default:
+ P("%s*[p=%p(0x%x),l=0x%x,s=%d,fmt=0x%x]%s", prefix, blk->ptr,
+ blk->ssptr, blk->dim.len, blk->stride, blk->fmt, suffix);
+ }
+}
+
+static void dump_slot(struct ptr_info* buf, char* prefix)
+{
+ P("%sbuf={n=%d,ptr=0x%x,type=%d,", prefix, buf->num_blocks, buf->ptr,
+ buf->type);
+ int ix = 0;
+ for (ix = 0; ix < buf->num_blocks; ix++)
+ {
+ dump_block(buf->blocks + ix, "", ix + 1 == buf->num_blocks ? "}" : "");
+ }
+}
+
+/**
+ * This method fills up a range of memory using a start address
+ * and start value. The method of filling ensures that
+ * accidentally overlapping regions have minimal chances of
+ * matching, even if the same starting value is used. This is
+ * because the difference between successive values varies as
+ * such. This series only repeats after 704189 values, so the
+ * probability of a match for a range of at least 2 values is
+ * less than 2*10^-11.
+ *
+ * V(i + 1) - V(i) = { 1, 2, 3, ..., 65535, 2, 4, 6, 8 ...,
+ * 65534, 3, 6, 9, 12, ..., 4, 8, 12, 16, ... }
+ *
+ * @author a0194118 (9/6/2009)
+ *
+ * @param start start value
+ * @param block pointer to block info strucure
+ */
+void fill_mem(uint16_t start, MemAllocBlock *block)
+{
+ IN;
+ uint16_t *ptr = (uint16_t *)block->ptr, delta = 1, step = 1;
+ bytes_t height, width, stride, i;
+ if (block->pixelFormat == PIXEL_FMT_PAGE)
+ {
+ height = 1;
+ stride = width = block->dim.len;
+ }
+ else
+ {
+ height = block->dim.area.height;
+ width = block->dim.area.width;
+ stride = block->stride;
+ }
+ width *= def_bpp(block->pixelFormat);
+ bytes_t size = height * stride;
+
+ P("(%p,0x%x*0x%x,s=0x%x)=0x%x", block->ptr, width, height, stride, start);
+
+ CHK_I(width,<=,stride);
+ uint32_t *ptr32 = (uint32_t *)ptr;
+ while (height--)
+ {
+ if (block->pixelFormat == PIXEL_FMT_32BIT)
+ {
+ for (i = 0; i < width; i += sizeof(uint32_t))
+ {
+ uint32_t val = (start & 0xFFFF) | (((uint32_t)(start + delta) & 0xFFFF) << 16);
+ *ptr32++ = val;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && (height || ((PAGE_SIZE - 1) & (uint32_t)ptr32)))
+ {
+ *ptr32++ = 0;
+ i += sizeof(uint32_t);
+ }
+#else
+ ptr32 += (stride - i) / sizeof(uint32_t);
+#endif
+ }
+ else
+ {
+ for (i = 0; i < width; i += sizeof(uint16_t))
+ {
+ *ptr++ = start;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && (height || ((PAGE_SIZE - 1) & (uint32_t)ptr)))
+ {
+ *ptr++ = 0;
+ i += sizeof(uint16_t);
+ }
+#else
+ ptr += (stride - i) / sizeof(uint16_t);
+#endif
+
+ }
+ }
+ CHK_P((block->pixelFormat == PIXEL_FMT_32BIT ? (void *)ptr32 : (void *)ptr),==,
+ (block->ptr + size));
+ OUT;
+}
+
+/**
+ * This verifies if a range of memory at a given address was
+ * filled up using the start value.
+ *
+ * @author a0194118 (9/6/2009)
+ *
+ * @param start start value
+ * @param block pointer to block info strucure
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int check_mem(uint16_t start, MemAllocBlock *block)
+{
+ IN;
+ uint16_t *ptr = (uint16_t *)block->ptr, delta = 1, step = 1;
+ bytes_t height, width, stride, r, i;
+ if (block->pixelFormat == PIXEL_FMT_PAGE)
+ {
+ height = 1;
+ stride = width = block->dim.len;
+ }
+ else
+ {
+ height = block->dim.area.height;
+ width = block->dim.area.width;
+ stride = block->stride;
+ }
+ width *= def_bpp(block->pixelFormat);
+
+ CHK_I(width,<=,stride);
+ uint32_t *ptr32 = (uint32_t *)ptr;
+ for (r = 0; r < height; r++)
+ {
+ if (block->pixelFormat == PIXEL_FMT_32BIT)
+ {
+ for (i = 0; i < width; i += sizeof(uint32_t))
+ {
+ uint32_t val = (start & 0xFFFF) | (((uint32_t)(start + delta) & 0xFFFF) << 16);
+ if (*ptr32++ != val) {
+ DP("assert: val[%u,%u] (=0x%x) != 0x%x", r, i, *--ptr32, val);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && ((r < height - 1) || ((PAGE_SIZE - 1) & (uint32_t)ptr32)))
+ {
+ if (*ptr32++) {
+ DP("assert: val[%u,%u] (=0x%x) != 0", r, i, *--ptr32);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ i += sizeof(uint32_t);
+ }
+#else
+ ptr32 += (stride - i) / sizeof(uint32_t);
+#endif
+ }
+ else
+ {
+ for (i = 0; i < width; i += sizeof(uint16_t))
+ {
+ if (*ptr++ != start) {
+ DP("assert: val[%u,%u] (=0x%x) != 0x%x", r, i, *--ptr, start);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ start += delta;
+ delta += step;
+ /* increase step if overflown */
+ if (delta < step) delta = ++step;
+ }
+#ifdef __WRITE_IN_STRIDE__
+ while (i < stride && ((r < height - 1) || ((PAGE_SIZE - 1) & (uint32_t)ptr)))
+ {
+ if (*ptr++) {
+ DP("assert: val[%u,%u] (=0x%x) != 0", r, i, *--ptr);
+ return R_I(MEMMGR_ERR_GENERIC);
+ }
+ i += sizeof(uint16_t);
+ }
+#else
+ ptr += (stride - i) / sizeof(uint16_t);
+#endif
+ }
+ }
+ return R_I(MEMMGR_ERR_NONE);
+}
+
+/**
+ * This method allocates a tiled buffer composed of an arbitrary
+ * set of tiled blocks. If successful, it checks
+ * that the block information was updated with the pointer to
+ * the block. Additionally, it verifies the correct return
+ * values for MemMgr_IsMapped, MemMgr_Is1DBlock,
+ * MemMgr_Is2DBlock, MemMgr_GetStride, TilerMem_GetStride. It
+ * also verifies TilerMem_VirtToPhys using an internally stored
+ * value of the ssptr. If any of these verifications fail, the
+ * buffer is freed. Otherwise, it is filled using the given
+ * start value.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param num_blocks Number of blocks in the buffer
+ * @param blocks Block information
+ * @param val Fill start value
+ * @param bufPtr Pointer to the allocated buffer
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+void *alloc_buf(int num_blocks, MemAllocBlock blocks[], uint16_t val)
+{
+ void *bufPtr = MemMgr_Alloc(blocks, num_blocks);
+ void *ptr = bufPtr;
+ int i;
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ if (bufPtr)
+ {
+ pixel_fmt_t fmt = blocks[i].pixelFormat;
+ bytes_t cstride = (fmt == PIXEL_FMT_PAGE ? PAGE_SIZE :
+ fmt == PIXEL_FMT_8BIT ? TILER_STRIDE_8BIT :
+ fmt == PIXEL_FMT_16BIT ? TILER_STRIDE_16BIT :
+ TILER_STRIDE_32BIT);
+ if (NOT_P(blocks[i].ptr,==,ptr) ||
+ NOT_I(MemMgr_IsMapped(ptr),!=,0) ||
+ NOT_I(MemMgr_Is1DBlock(ptr),==,fmt == PIXEL_FMT_PAGE ? 1 : 0) ||
+ NOT_I(MemMgr_Is2DBlock(ptr),==,fmt == PIXEL_FMT_PAGE ? 0 : 1) ||
+ NOT_I(MemMgr_GetStride(bufPtr),==,blocks[i].stride) ||
+ NOT_P(TilerMem_VirtToPhys(ptr),==,blocks[i].reserved) ||
+ NOT_I(TilerMem_GetStride(TilerMem_VirtToPhys(ptr)),==,cstride) ||
+ NOT_L((PAGE_SIZE - 1) & (long)ptr,==,(PAGE_SIZE - 1) & blocks[i].reserved) ||
+ (fmt == PIXEL_FMT_PAGE || NOT_I(blocks[i].stride,!=,0)))
+ {
+ P(" for block %d", i);
+ MemMgr_Free(bufPtr);
+ return NULL;
+ }
+ fill_mem(val, blocks + i);
+ if (blocks[i].pixelFormat != PIXEL_FMT_PAGE)
+ {
+ ptr += def_stride(blocks[i].dim.area.width *
+ def_bpp(blocks[i].pixelFormat)) * blocks[i].dim.area.height;
+ }
+ else
+ {
+ ptr += def_stride(blocks[i].dim.len);
+ }
+ }
+ else
+ {
+ A_P(blocks[i].ptr,==,ptr);
+ A_I(blocks[i].reserved,==,0);
+ }
+ }
+ return bufPtr;
+}
+
+/**
+ * This method frees a tiled buffer composed of an arbitrary set
+ * of tiled blocks. The given start value is used to verify that
+ * the buffer is still correctly filled. In the event of any
+ * errors, the error value is returned.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param num_blocks Number of blocks in the buffer
+ * @param blocks Block information
+ * @param val Fill start value
+ * @param bufPtr Pointer to the allocated buffer
+ *
+ * @return 0 on success, non-0 error value on failure
+ */
+int free_buf(int num_blocks, MemAllocBlock blocks[], uint16_t val, void *bufPtr)
+{
+ MemAllocBlock blk;
+ void *ptr = bufPtr;
+ int ret = 0, i;
+ for (i = 0; i < num_blocks; i++)
+ {
+ blk = blocks[i];
+ blk.ptr = ptr;
+ ERR_ADD(ret, check_mem(val, &blk));
+ if (blk.pixelFormat != PIXEL_FMT_PAGE)
+ {
+ ptr += def_stride(blk.dim.area.width *
+ def_bpp(blk.pixelFormat)) * blk.dim.area.height;
+ }
+ else
+ {
+ ptr += def_stride(blk.dim.len);
+ }
+ blk.reserved = 0;
+ }
+
+ ERR_ADD(ret, MemMgr_Free(bufPtr));
+ return ret;
+}
+
+#if 0
+struct slot {
+ int op;
+ SSPtr ssptr;
+ void *buffer;
+ void *dataPtr;
+} *slots = NULL;
+
+enum op_enum {
+ op_map_1d,
+ op_alloc_1d,
+ op_alloc_8,
+ op_alloc_16,
+ op_alloc_32,
+ op_alloc_nv12,
+ op_alloc_gen,
+};
+
+static int free_slot(int ix)
+{
+ if (slots[ix].bufPtr)
+ {
+ /* check memory fill */
+ switch (slots[ix].op)
+ {
+ case op_map_1d:
+ res = unmap_1D(slot[ix].dataPtr, slot[ix].length, 0, slot[ix].val, slot[ix].bufPtr);
+ FREE(mem[ix].buffer);
+ break;
+ case op_alloc_1d:
+ res = free_1D(slot[ix].length, 0, slot[ix].val, mem[ix].bufPtr);
+ break;
+ case op_alloc_8:
+ res = free_2D(slot[ix].width, slot[ix].height, PIXEL_FMT_8BIT, 0, slot[ix].val, slot[ix].bufPtr);
+ break;
+ case op_alloc_16:
+ res = free_2D(slot[ix].width, slot[ix].height, PIXEL_FMT_16BIT, 0, slot[ix].val, slot[ix].bufPtr);
+ break;
+ case op_alloc_32:
+ res = free_2D(slot[ix].width, slot[ix].height, PIXEL_FMT_32BIT, 0, slot[ix].val, slot[ix].bufPtr);
+ break;
+ case op_alloc_nv12:
+ res = free_NV12(slot[ix].width, slot[ix].height, slot[ix].val, slot[ix].bufPtr);
+ break;
+ }
+ P("%s[%p]", mem[ix].op ? "free" : "unmap", mem[ix].bufPtr);
+ ZERO(slot[ix]);
+ }
+}
+#endif
+
+#include <tilermgr.h>
+
+char *parse_num(char *p, int *tgt)
+{
+ int len;
+ if (!strncmp(p, "0x", 2)) { /* hex number */
+ if (NOT_I(sscanf(p, "0x%x%n", tgt, &len),==,1))
+ return NULL;
+ return p + len;
+ } else {
+ if (NOT_I(sscanf(p, "%d%n", tgt, &len),==,1))
+ return NULL;
+ return p + len;
+ }
+}
+
+/**
+ * Parametric memmgr test. This is similar to the star test
+ * except the operations are read from the command line:
+ *
+ * [#=]a:w*h*bits[,w*h*bits...] allocates a list of blocks as
+ * buffer # (it frees any previously allocated/mapped buffer
+ * f:# frees a buffer
+ *
+ *
+ * @author a0194118 (11/4/2009)
+ *
+ * @param argc
+ * @param argv
+ */
+int param_test(int argc, char **argv)
+{
+ int delta_slots = 16;
+ int ix, i, n, t, type, max_n, num_slots = delta_slots;
+ struct ptr_info *slots;
+ ALLOCN(slots, num_slots);
+ if (NOT_P(slots,!=,NULL)) return 1;
+
+ int res = TilerMgr_Open();
+ for (i = 1; i < argc && !res; i++)
+ {
+ uint16_t val = (uint16_t) rand();
+
+ char *p = argv[i], *q;
+ struct ptr_info buf;
+ ZERO(buf);
+
+ /* read slot */
+ ix = -1;
+ if (isdigit(*p))
+ {
+ res = 1;
+ q = parse_num(p, &ix);
+ if (NOT_P(q,!=,NULL) || NOT_I(ix,>,0)) break;
+ p = q;
+ if (NOT_I(*p++,==,'.')) break;
+ res = 0;
+ ix--;
+ }
+
+ type = *p++;
+ /* get default slot */
+ if (ix < 0)
+ {
+ switch (type)
+ {
+ /* allocation defaults to the 1st free slot */
+ case 'a':
+ case 'A':
+ for (ix = 0; ix < num_slots && slots[ix].type; ix++);
+ break;
+
+ /* frees default to the 1st used block */
+ case 'f':
+ case 'F':
+ for (ix = 0; ix < num_slots && !slots[ix].type; ix++);
+ if (NOT_I(ix,<,num_slots)) res = 1;
+ break;
+ }
+ }
+ if (res) break;
+
+ /* allocate more slots if needed */
+ if (ix >= num_slots)
+ {
+ int more_slots = ROUND_UP_TO(ix + 1, delta_slots);
+ struct ptr_info *new_slots;
+ ALLOCN(new_slots, more_slots);
+ if (NOT_P(new_slots,!=,NULL)) break;
+ memcpy(new_slots, slots, sizeof(*slots) * num_slots);
+ FREE(slots);
+ slots = new_slots;
+ num_slots = more_slots;
+ }
+
+ /* perform opertaion */
+ res = 1; /* assume failure */
+ switch (type)
+ {
+ case 'a': /* allocate */
+ case 'A': /* tiler-allocate */
+ switch (type)
+ {
+ case 'a': buf.type = ptr_alloced; max_n = TILER_MAX_NUM_BLOCKS; break;
+ case 'A': buf.type = ptr_tiler_alloced; max_n = 1; break;
+ }
+ if (ix < num_slots && NOT_I(slots[ix].type,==,ptr_empty)) break;
+ if (NOT_I(*p++,==,':')) break;
+ for (n = 0; *p && n < max_n; n++) {
+ /* read length or width */
+ p = parse_num(p, (int *) &buf.blocks[n].dim.len);
+ if (NOT_P(p,!=,NULL)) break;
+ if (*p == '*') { /* 2d block */
+ buf.blocks[n].dim.area.width = (uint16_t) buf.blocks[n].dim.len;
+ /* read height */
+ p = parse_num(++p, &t);
+ if (NOT_P(p,!=,NULL) || NOT_I(*p++,==,'*')) break;
+ buf.blocks[n].dim.area.height = (uint16_t) t;
+ /* read bits */
+ p = parse_num(p, &t);
+ if (NOT_P(p,!=,NULL)) break;
+ /* handle nv12 */
+ if (t == 12 && n + 1 < max_n) {
+ buf.blocks[n + 1].dim.area.width = buf.blocks[n].dim.area.width >> 1;
+ buf.blocks[n + 1].dim.area.height = buf.blocks[n].dim.area.height >> 1;
+ buf.blocks[n].fmt = TILFMT_8BIT;
+ t = 16;
+ n++;
+ }
+
+ buf.blocks[n].fmt = (t == 8 ? TILFMT_8BIT :
+ t == 16 ? TILFMT_16BIT :
+ t == 32 ? TILFMT_32BIT : TILFMT_INVALID);
+ if (NOT_I(buf.blocks[n].fmt,!=,TILFMT_INVALID)) break;
+ } else { /* 1d block */
+ buf.blocks[n].fmt = TILFMT_PAGE;
+ }
+ if (*p && NOT_I(*p++,==,',')) break;
+ /* we're OK */
+ res = 0;
+ }
+ if (res || *p) break;
+ /* allocate buffer */
+
+ buf.num_blocks = n;
+ buf.val = val;
+ if (buf.type == ptr_alloced)
+ {
+ dump_slot(&buf, "==(alloc)=>");
+ buf.ptr = (int) alloc_buf(n, (MemAllocBlock *) buf.blocks, val);
+ dump_slot(&buf, "<=(alloc)==");
+ }
+ else
+ {
+ dump_slot(&buf, "==(tiler_alloc)=>");
+ if (buf.blocks[0].fmt == TILFMT_PAGE)
+ {
+ buf.ptr = (int) TilerMgr_PageModeAlloc(buf.blocks[0].dim.len);
+ }
+ else
+ {
+ buf.ptr =(int) TilerMgr_Alloc(buf.blocks[0].fmt,
+ buf.blocks[0].dim.area.width,
+ buf.blocks[0].dim.area.height);
+ }
+ buf.blocks[0].ssptr = (unsigned long) buf.ptr;
+ dump_slot(&buf, "<=(tiler_alloc)==");
+ }
+ if (NOT_I(buf.ptr,!=,0)) res = 1;
+ else memcpy(slots + ix, &buf, sizeof(buf));
+ break;
+
+ case 'f': /* free */
+ case 'F': /* tiler-free */
+ memcpy(&buf, slots + ix, sizeof(buf));
+ switch (type)
+ {
+ case 'f':
+ if (NOT_I(buf.type,==,ptr_alloced)) break;
+ dump_slot(&buf, "==(free)=>");
+ res = free_buf(buf.num_blocks, (MemAllocBlock *) buf.blocks,
+ buf.val, (void *) buf.ptr);
+ P("<=(free)==: %d", res);
+ break;
+ case 'F':
+ if (NOT_I(buf.type,==,ptr_tiler_alloced)) break;
+ dump_slot(&buf, "==(tiler_free)=>");
+ if (buf.blocks[0].fmt == TILFMT_PAGE)
+ {
+ res = TilerMgr_PageModeFree((SSPtr) buf.ptr);
+ }
+ else
+ {
+ res = TilerMgr_Free((SSPtr) buf.ptr);
+ }
+ P("<=(tiler_free)==: %d", res);
+ break;
+ }
+ ZERO(slots[ix]);
+ break;
+ }
+ }
+
+ /* free any memmgr allocated blocks */
+ for (ix = 0; ix < num_slots; ix++)
+ {
+ if (slots[ix].type == ptr_alloced)
+ {
+ dump_slot(slots + ix, "==(free)=>");
+ int res_free = free_buf(slots[ix].num_blocks, (MemAllocBlock *) slots[ix].blocks,
+ slots[ix].val, (void *) slots[ix].ptr);
+ P("<=(free)==: %d", res_free);
+ ERR_ADD(res, res_free);
+ }
+ }
+ ERR_ADD(res, TilerMgr_Close());
+
+ FREE(slots);
+ return res;
+}
+
+/**
+ * Main test function. Checks arguments for test case ranges,
+ * runs tests and prints usage or test list if required.
+ *
+ * @author a0194118 (9/7/2009)
+ *
+ * @param argc Number of arguments
+ * @param argv Arguments
+ *
+ * @return -1 on usage or test list, otherwise # of failed
+ * tests.
+ */
+int main(int argc, char **argv)
+{
+ int res = param_test(argc, argv);
+ P(res ? "FAILURE: %d" : "SUCCESS", res);
+ return res;
+}
+
diff --git a/tiler/tilermem.h b/tiler/tilermem.h
new file mode 100644
index 0000000..2c89d84
--- /dev/null
+++ b/tiler/tilermem.h
@@ -0,0 +1,79 @@
+/*
+ * tilermem.h
+ *
+ * Tiler Memory Interface functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TILERMEM_H_
+#define _TILERMEM_H_
+
+/* retrieve type definitions */
+#include "mem_types.h"
+
+/**
+ * Tiler Memory Allocator is responsible for:
+ * <ol>
+ * <li>Getting the stride information for containers(???) or
+ * buffers
+ * <li>Converting virtual addresses to physical addresses.
+ * </ol>
+ */
+
+/**
+ * Returns the tiler stride corresponding to the system space
+ * address. For 2D buffers it returns the container stride. For
+ * 1D buffers it returns the page size. For non-tiler buffers
+ * it returns 0.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param ptr pointer to a virtual address
+ *
+ * @return The stride of the block that contains the address.
+ */
+bytes_t TilerMem_GetStride(SSPtr ssptr);
+
+/**
+ * Retrieves the physical system-space address that corresponds
+ * to the virtual address.
+ *
+ * @author a0194118 (9/1/2009)
+ *
+ * @param ptr pointer to a virtual address
+ *
+ * @return The physical system-space address that the virtual
+ * address refers to. If the virtual address is invalid
+ * or unmapped, it returns 0.
+ */
+SSPtr TilerMem_VirtToPhys(void *ptr);
+
+#endif
diff --git a/tiler/tilermem_utils.h b/tiler/tilermem_utils.h
new file mode 100644
index 0000000..1d6210d
--- /dev/null
+++ b/tiler/tilermem_utils.h
@@ -0,0 +1,52 @@
+/*
+ * tilermem_utils.h
+ *
+ * Memory Allocator Interface internal definitions and functions needed for
+ * unit testing.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TILERMEM_UTILS_H_
+#define _TILERMEM_UTILS_H_
+
+#include <tiler.h>
+
+#define TILER_PAGE_WIDTH 64
+#define TILER_PAGE_HEIGHT 64
+
+#define TILER_STRIDE_8BIT (TILER_WIDTH * TILER_PAGE_WIDTH)
+#define TILER_STRIDE_16BIT (TILER_WIDTH * TILER_PAGE_WIDTH * 2)
+#define TILER_STRIDE_32BIT (TILER_WIDTH * TILER_PAGE_WIDTH * 2)
+
+#define PAGE_SIZE TILER_PAGE
+
+#endif
+
diff --git a/tiler/tilermgr.c b/tiler/tilermgr.c
new file mode 100644
index 0000000..470de5c
--- /dev/null
+++ b/tiler/tilermgr.c
@@ -0,0 +1,203 @@
+/*
+ * tilermgr.c
+ *
+ * TILER library support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h> /* strerror() */
+#include <fcntl.h> /* open() */
+#include <unistd.h> /* close() */
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <tiler.h>
+#include "tilermgr.h"
+#include "mem_types.h"
+
+
+#define TILERMGR_ERROR() \
+ fprintf(stderr, "%s()::%d: errno(%d) - \"%s\"\n", \
+ __FUNCTION__, __LINE__, errno, strerror(errno)); \
+ fflush(stderr);
+
+static int fd;
+
+int TilerMgr_Close()
+{
+ close(fd);
+ return TILERMGR_ERR_NONE;
+}
+
+int TilerMgr_Open()
+{
+ fd = open(TILER_DEVICE_PATH, O_RDWR);
+ if (fd < 0) {
+ TILERMGR_ERROR();
+ return TILERMGR_ERR_GENERIC;
+ }
+
+ return TILERMGR_ERR_NONE;
+}
+
+SSPtr TilerMgr_Alloc(enum pixel_fmt_t pixfmt, pixels_t width, pixels_t height)
+{
+ int ret = -1;
+ struct tiler_block_info block = {0};
+
+ if (pixfmt < PIXEL_FMT_8BIT || pixfmt > PIXEL_FMT_32BIT)
+ return 0x0;
+ if (width <= 0 || width > TILER_WIDTH * 64)
+ return 0x0;
+ if (height <= 0 || height > TILER_HEIGHT * 64)
+ return 0x0;
+
+ block.fmt = pixfmt;
+ block.dim.area.width = width;
+ block.dim.area.height = height;
+
+ ret = ioctl(fd, TILIOC_GBLK, (unsigned long)(&block));
+ if (ret < 0) {
+ TILERMGR_ERROR();
+ return 0x0;
+ }
+ return block.ssptr;
+}
+
+int TilerMgr_Free(SSPtr addr)
+{
+ int ret = -1;
+ struct tiler_block_info block = {0};
+
+ if (addr < TILER_MEM_8BIT || addr >= TILER_MEM_PAGED)
+ return TILERMGR_ERR_GENERIC;
+
+ block.ssptr = addr;
+
+ ret = ioctl(fd, TILIOC_FBLK, (unsigned long)(&block));
+ if (ret < 0) {
+ TILERMGR_ERROR();
+ return TILERMGR_ERR_GENERIC;
+ }
+ return TILERMGR_ERR_NONE;
+}
+
+SSPtr TilerMgr_PageModeAlloc(bytes_t len)
+{
+ int ret = -1;
+ struct tiler_block_info block = {0};
+
+ if(len < 0 || len > TILER_LENGTH)
+ return 0x0;
+
+ block.fmt = TILFMT_PAGE;
+ block.dim.len = len;
+
+ ret = ioctl(fd, TILIOC_GBLK, (unsigned long)(&block));
+ if (ret < 0) {
+ TILERMGR_ERROR();
+ return 0x0;
+ }
+ return block.ssptr;
+}
+
+int TilerMgr_PageModeFree(SSPtr addr)
+{
+ int ret = -1;
+ struct tiler_block_info block = {0};
+
+ if (addr < TILER_MEM_PAGED || addr >= TILER_MEM_END)
+ return TILERMGR_ERR_GENERIC;
+
+ block.ssptr = addr;
+
+ ret = ioctl(fd, TILIOC_FBLK, (unsigned long)(&block));
+ if (ret < 0) {
+ TILERMGR_ERROR();
+ return TILERMGR_ERR_GENERIC;
+ }
+ return TILERMGR_ERR_NONE;
+}
+
+SSPtr TilerMgr_VirtToPhys(void *ptr)
+{
+ int ret = -1;
+ unsigned long tmp = 0x0;
+
+ if(ptr == NULL)
+ return 0x0;
+
+ tmp = (unsigned long)ptr;
+ ret = ioctl(fd, TILIOC_GSSP, tmp);
+
+ return (SSPtr)ret;
+}
+
+SSPtr TilerMgr_Map(void *ptr, bytes_t len)
+{
+ int ret = -1;
+ struct tiler_block_info block = {0};
+
+ if (len < 0 || len > TILER_LENGTH)
+ return 0x0;
+
+ block.fmt = TILFMT_PAGE;
+ block.dim.len = len;
+ block.ptr = ptr;
+
+ ret = ioctl(fd, TILIOC_MBLK, (unsigned long)(&block));
+ if (ret < 0) {
+ TILERMGR_ERROR();
+ return 0x0;
+ }
+ return block.ssptr;
+}
+
+int TilerMgr_Unmap(SSPtr addr)
+{
+ int ret = -1;
+ struct tiler_block_info block = {0};
+
+ if (addr < TILER_MEM_PAGED || addr >= TILER_MEM_END)
+ return TILERMGR_ERR_GENERIC;
+
+ block.ssptr = addr;
+
+ ret = ioctl(fd, TILIOC_UMBLK, (unsigned long)(&block));
+ if (ret < 0) {
+ TILERMGR_ERROR();
+ return TILERMGR_ERR_GENERIC;
+ }
+ return TILERMGR_ERR_NONE;
+}
+
diff --git a/tiler/tilermgr.h b/tiler/tilermgr.h
new file mode 100644
index 0000000..cd76822
--- /dev/null
+++ b/tiler/tilermgr.h
@@ -0,0 +1,54 @@
+/*
+ * tilermgr.h
+ *
+ * TILER library support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TILERMGR_H_
+#define _TILERMGR_H_
+
+#include "mem_types.h"
+
+#define TILERMGR_ERR_NONE (0)
+#define TILERMGR_ERR_GENERIC (-1)
+
+int TilerMgr_Open();
+int TilerMgr_Close();
+SSPtr TilerMgr_Alloc(enum pixel_fmt_t pixfmt, pixels_t width, pixels_t height);
+int TilerMgr_Free(SSPtr ssptr);
+SSPtr TilerMgr_PageModeAlloc(bytes_t length);
+int TilerMgr_PageModeFree(SSPtr ssptr);
+SSPtr TilerMgr_Map(void *ptr, bytes_t length);
+int TilerMgr_Unmap(SSPtr ssptr);
+SSPtr TilerMgr_VirtToPhys(void *ptr);
+
+#endif
diff --git a/tiler/utils.h b/tiler/utils.h
new file mode 100644
index 0000000..d194a43
--- /dev/null
+++ b/tiler/utils.h
@@ -0,0 +1,63 @@
+/*
+ * utils.h
+ *
+ * Utility definitions for the Memory Interface for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _UTILS_H_
+#define _UTILS_H_
+
+/* ---------- Generic Macros Used in Macros ---------- */
+
+/* allocation macro */
+#define NEW(type) (type*)calloc(1, sizeof(type))
+#define NEWN(type,n) (type*)calloc(n, sizeof(type))
+#define ALLOC(var) var = calloc(1, sizeof(*var))
+#define ALLOCN(var,n) var = calloc(n, sizeof(*var))
+
+
+/* free variable and set it to NULL */
+#define FREE(var) do { free(var); var = NULL; } while(0)
+
+/* clear variable */
+#define ZERO(var) memset(&(var), 0, sizeof(var))
+
+/* binary round methods */
+#define ROUND_DOWN_TO2POW(x, N) ((x) & ~((N)-1))
+#define ROUND_UP_TO2POW(x, N) ROUND_DOWN_TO2POW((x) + (N) - 1, N)
+
+/* regulare round methods */
+#define ROUND_DOWN_TO(x, N) ((x) / (N) * (N))
+#define ROUND_UP_TO(x, N) ROUND_DOWN_TO((x) + (N) - 1, N)
+
+#endif
+
diff --git a/tiler/utils_test.c b/tiler/utils_test.c
new file mode 100644
index 0000000..c152125
--- /dev/null
+++ b/tiler/utils_test.c
@@ -0,0 +1,489 @@
+/*
+ * utils_test.c
+ *
+ * Memory Allocator Utility tests.
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define __DEBUG__
+#define __DEBUG_ASSERT__
+#define __DEBUG_ENTRY__
+
+#include <utils.h>
+#include <list_utils.h>
+#include <debug_utils.h>
+#include "testlib.h"
+
+#define TESTS\
+ T(test_new())\
+ T(test_list())\
+ T(test_ezlist())\
+ T(test_dzlist())\
+ T(test_plist())\
+ T(test_mlist())\
+ T(test_math())
+
+#define F() in = head.next;
+#define N(a) res |= NOT_P(in,!=,&head); \
+ res |= NOT_P(in->me,!=,NULL) || NOT_I(in->me->data,==,a); in = in->next;
+#define Z(a) res |= NOT_P(in,!=,&head); \
+ res |= NOT_I(in->data,==,a); in = in->next;
+#define L() res |= NOT_P(in,==,&head);
+
+int all_zero(int *p, int len)
+{
+ IN;
+ int ix = 0;
+ for (ix = 0; ix < len; ix ++)
+ {
+ if (p[ix])
+ {
+ P("[%d]=%d\n", ix, p[ix]);
+ return R_I(1);
+ }
+ }
+ return R_I(0);
+}
+
+int test_new() {
+ IN;
+ int *p;
+ p = NEW(int);
+ int res = NOT_I(all_zero(p, 1),==,0);
+ FREE(p);
+ res |= NOT_I(p,==,NULL);
+ p = NEWN(int, 8000);
+ res |= NOT_I(all_zero(p, 8000),==,0);
+ FREE(p);
+ p = NEWN(int, 1000000);
+ res |= NOT_I(all_zero(p, 1000000),==,0);
+ FREE(p);
+ return R_I(res);
+}
+
+int test_list() {
+ IN;
+
+ struct elem {
+ int data;
+ } *elA, *elB;
+ struct list {
+ struct elem *me;
+ struct list *last, *next;
+ } head, *inA, *inB, *in, *in_safe;
+
+ /* initialization */
+ DLIST_INIT(head);
+ int res = NOT_I(DLIST_IS_EMPTY(head),!=,0);
+
+ /* add element at beginning of list */
+ elA = NEW(struct elem);
+ elA->data = 1;
+ inA = NEW(struct list);
+ DLIST_ADD_AFTER(head, elA, *inA);
+ F()N(1)L();
+
+ /* add element after an element */
+ elB = NEW(struct elem);
+ elB->data = 2;
+ inB = NEW(struct list);
+ DLIST_ADD_AFTER(*inA, elB, *inB);
+ F()N(1)N(2)L();
+
+ /* add element at the end of the list */
+ elB = NEW(struct elem);
+ inB = NEW(struct list);
+ (DLIST_ADD_BEFORE(head, elB, *inB))->data = 3;
+ F()N(1)N(2)N(3)L();
+
+ /* move an element to another position or another list */
+ DLIST_MOVE_AFTER(head, *inB);
+ F()N(3)N(1)N(2)L();
+
+ DLIST_MOVE_BEFORE(head, *inB);
+ F()N(1)N(2)N(3)L();
+
+ /* works even if the position is the same */
+ DLIST_MOVE_BEFORE(head, *inB);
+ F()N(1)N(2)N(3)L();
+
+ res |= NOT_I(DLIST_FIRST(head)->data,==,1);
+ res |= NOT_I(DLIST_LAST(head)->data,==,3);
+
+ DLIST_LOOP(head, in) {
+ P("%d", in->me->data);
+ }
+ P(".");
+
+ /* remove elements */
+ DLIST_SAFE_LOOP(head, in, in_safe) {
+ if (in->me->data > 1)
+ {
+ DLIST_REMOVE(*in);
+ FREE(in->me);
+ FREE(in);
+ }
+ }
+ F()N(1)L();
+
+ /* delete list */
+ DLIST_SAFE_LOOP(head, in, in_safe) {
+ DLIST_REMOVE(*in);
+ FREE(in->me);
+ FREE(in);
+ }
+ F()L();
+
+ return R_I(res);
+}
+
+int test_ezlist() {
+ IN;
+
+ struct elem {
+ int data;
+ struct elem *me, *last, *next;
+ } *elA, *elB, head, *el, *el_safe, *in;
+
+ /* initialization */
+ DLIST_INIT(head);
+ int res = NOT_I(DLIST_IS_EMPTY(head),!=,0);
+
+ /* add element at beginning of list */
+ elA = NEW(struct elem);
+ elA->data = 1;
+ DLIST_ADD_AFTER(head, elA, *elA);
+ F()N(1)L();
+
+ /* add element after an element */
+ elB = NEW(struct elem);
+ elB->data = 2;
+ DLIST_ADD_AFTER(*elA, elB, *elB);
+ F()N(1)N(2)L();
+
+ /* add element at the end of the list */
+ elB = NEW(struct elem);
+ (DLIST_ADD_BEFORE(head, elB, *elB))->data = 3;
+ F()N(1)N(2)N(3)L();
+
+ /* move an element to another position or another list */
+ DLIST_MOVE_AFTER(head, *elB);
+ F()N(3)N(1)N(2)L();
+
+ DLIST_MOVE_BEFORE(head, *elB);
+ F()N(1)N(2)N(3)L();
+
+ /* works even if the position is the same */
+ DLIST_MOVE_BEFORE(head, *elB);
+ F()N(1)N(2)N(3)L();
+
+ res |= NOT_I(DLIST_FIRST(head)->data,==,1);
+ res |= NOT_I(DLIST_LAST(head)->data,==,3);
+
+ DLIST_LOOP(head, el) {
+ P("%d", el->data);
+ }
+ P(".");
+
+ /* remove elements */
+ DLIST_SAFE_RLOOP(head, el, el_safe) {
+ if (el->me->data == 1)
+ {
+ DLIST_REMOVE(*el);
+ FREE(el);
+ }
+ }
+ F()N(2)N(3)L();
+
+ /* delete list */
+ DLIST_SAFE_LOOP(head, el, el_safe) {
+ DLIST_REMOVE(*el);
+ FREE(el);
+ }
+ F()L();
+
+ return R_I(res);
+}
+
+int test_dzlist() {
+ IN;
+
+ struct elem {
+ int data;
+ struct elem *last, *next;
+ } *elA, *elB, head, *el, *el_safe, *in;
+
+ /* initialization */
+ DZLIST_INIT(head);
+ int res = NOT_I(DZLIST_IS_EMPTY(head),!=,0);
+
+ /* add element at beginning of list */
+ elA = NEW(struct elem);
+ elA->data = 1;
+ DZLIST_ADD_AFTER(head, *elA);
+ F()Z(1)L();
+
+ /* add element after an element */
+ elB = NEW(struct elem);
+ elB->data = 2;
+ DZLIST_ADD_AFTER(*elA, *elB);
+ F()Z(1)Z(2)L();
+
+ /* add element at the end of the list */
+ elB = NEW(struct elem);
+ (DZLIST_ADD_BEFORE(head, *elB))->data = 3;
+ F()Z(1)Z(2)Z(3)L();
+
+ /* move an element to another position or another list */
+ DZLIST_MOVE_AFTER(head, *elB);
+ F()Z(3)Z(1)Z(2)L();
+
+ DZLIST_MOVE_BEFORE(head, *elB);
+ F()Z(1)Z(2)Z(3)L();
+
+ /* works even if the position is the same */
+ DZLIST_MOVE_BEFORE(head, *elB);
+ F()Z(1)Z(2)Z(3)L();
+
+ res |= NOT_I(DZLIST_FIRST(head)->data,==,1);
+ res |= NOT_I(DZLIST_LAST(head)->data,==,3);
+
+ DZLIST_LOOP(head, el) {
+ P("%d", el->data);
+ }
+ P(".");
+
+ /* remove elements */
+ DZLIST_SAFE_RLOOP(head, el, el_safe) {
+ if (el->data == 1)
+ {
+ DZLIST_REMOVE(*el);
+ FREE(el);
+ }
+ }
+ F()Z(2)Z(3)L();
+
+ /* delete list */
+ DZLIST_SAFE_LOOP(head, el, el_safe) {
+ DZLIST_REMOVE(*el);
+ FREE(el);
+ }
+ F()L();
+
+ return R_I(res);
+}
+
+int test_plist() {
+ IN;
+
+ struct elem;
+ struct list {
+ struct elem *me;
+ struct list *last, *next;
+ } head, *inA, *inB, *in;
+ struct elem {
+ int data;
+ struct list *list_data;
+ } *elA, *elB, *el, *el_safe;
+
+ /* initialization */
+ DLIST_INIT(head);
+ int res = NOT_I(DLIST_IS_EMPTY(head),!=,0);
+
+ /* add element at beginning of list */
+ elA = NEW(struct elem);
+ elA->data = 1;
+ inA = NEW(struct list);
+ DLIST_PADD_AFTER(head, elA, inA, list_data);
+ F()N(1)L();
+
+ /* add element after an element */
+ elB = NEW(struct elem);
+ elB->data = 2;
+ inB = NEW(struct list);
+ DLIST_PADD_AFTER(*inA, elB, inB, list_data);
+ F()N(1)N(2)L();
+
+ /* add element at the end of the list */
+ elB = NEW(struct elem);
+ inB = NEW(struct list);
+ (DLIST_PADD_BEFORE(head, elB, inB, list_data))->data = 3;
+ F()N(1)N(2)N(3)L();
+
+ /* move an element to another position or another list */
+ DLIST_MOVE_AFTER(head, *inB);
+ F()N(3)N(1)N(2)L();
+
+ DLIST_MOVE_BEFORE(head, *inB);
+ F()N(1)N(2)N(3)L();
+
+ /* works even if the position is the same */
+ DLIST_MOVE_BEFORE(head, *inB);
+ F()N(1)N(2)N(3)L();
+
+ res |= NOT_I(DLIST_FIRST(head)->data,==,1);
+ res |= NOT_I(DLIST_LAST(head)->data,==,3);
+
+ DLIST_LOOP(head, in) {
+ P("%d", in->me->data);
+ }
+ P(".");
+ DLIST_PLOOP(head, el, list_data) {
+ P("%d", el->data);
+ }
+ P(".");
+
+ /* remove elements */
+ DLIST_SAFE_PLOOP(head, el, el_safe, list_data) {
+ if (el->data == 2)
+ {
+ DLIST_REMOVE(*el->list_data);
+ FREE(el->list_data);
+ FREE(el);
+ }
+ }
+ F()N(1)N(3)L();
+
+ /* delete list */
+ DLIST_SAFE_PLOOP(head, el, el_safe, list_data) {
+ DLIST_REMOVE(*el->list_data);
+ FREE(el->list_data);
+ FREE(el);
+ }
+ F()L();
+
+ return R_I(res);
+}
+
+int test_mlist() {
+ IN;
+
+ struct elem {
+ int data;
+ struct list {
+ struct list *last, *next;
+ struct elem *me;
+ } list_data;
+ } *elA, *elB, *el, *el_safe;
+ struct list head, *in;
+
+ /* initialization */
+ DLIST_INIT(head);
+ int res = NOT_I(DLIST_IS_EMPTY(head),!=,0);
+
+ /* add element at beginning of list */
+ elA = NEW(struct elem);
+ elA->data = 1;
+ DLIST_MADD_AFTER(head, elA, list_data);
+ F()N(1)L();
+
+ /* add element after an element */
+ elB = NEW(struct elem);
+ elB->data = 2;
+ DLIST_MADD_AFTER(elA->list_data, elB, list_data);
+ F()N(1)N(2)L();
+
+ /* add element at the end of the list */
+ elB = NEW(struct elem);
+ (DLIST_MADD_BEFORE(head, elB, list_data))->data = 3;
+ F()N(1)N(2)N(3)L();
+
+ /* move an element to another position or another list */
+ DLIST_MOVE_AFTER(head, elB->list_data);
+ F()N(3)N(1)N(2)L();
+
+ DLIST_MOVE_BEFORE(head, elB->list_data);
+ F()N(1)N(2)N(3)L();
+
+ /* works even if the position is the same */
+ DLIST_MOVE_BEFORE(head, elB->list_data);
+ F()N(1)N(2)N(3)L();
+
+ res |= NOT_I(DLIST_FIRST(head)->data,==,1);
+ res |= NOT_I(DLIST_LAST(head)->data,==,3);
+
+ DLIST_LOOP(head, in) {
+ P("%d", in->me->data);
+ }
+ P(".");
+ DLIST_MLOOP(head, el, list_data) {
+ P("%d", el->data);
+ }
+ P(".");
+
+ /* remove elements */
+ DLIST_SAFE_MLOOP(head, el, el_safe, list_data) {
+ if (el->data != 2)
+ {
+ DLIST_REMOVE(el->list_data);
+ FREE(el);
+ }
+ }
+ F()N(2)L();
+
+ /* delete list */
+ DLIST_SAFE_MLOOP(head, el, el_safe, list_data) {
+ DLIST_REMOVE(el->list_data);
+ FREE(el);
+ }
+ F()L();
+
+ return R_I(res);
+}
+
+int test_math()
+{
+ IN;
+ int res = 0;
+ res |= NOT_I(ROUND_UP_TO2POW(0, 4096),==,0);
+ res |= NOT_I(ROUND_UP_TO2POW(1, 4096),==,4096);
+ res |= NOT_I(ROUND_UP_TO2POW(4095, 4096),==,4096);
+ res |= NOT_I(ROUND_UP_TO2POW(4096, 4096),==,4096);
+ res |= NOT_I(ROUND_UP_TO2POW(4097, 4096),==,8192);
+ res |= NOT_I(ROUND_DOWN_TO2POW(0, 4096),==,0);
+ res |= NOT_I(ROUND_DOWN_TO2POW(1, 4096),==,0);
+ res |= NOT_I(ROUND_DOWN_TO2POW(4095, 4096),==,0);
+ res |= NOT_I(ROUND_DOWN_TO2POW(4096, 4096),==,4096);
+ res |= NOT_I(ROUND_DOWN_TO2POW(4097, 4096),==,4096);
+ return R_I(res);
+}
+
+DEFINE_TESTS(TESTS)
+
+int main(int argc, char **argv)
+{
+ return TestLib_Run(argc, argv, nullfn, nullfn, NULL);
+}
+