summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--api/current.txt1
-rw-r--r--api/system-current.txt1
-rw-r--r--core/java/android/hardware/camera2/CameraCharacteristics.java4
-rw-r--r--graphics/java/android/graphics/ImageFormat.java105
-rw-r--r--media/jni/android_media_ImageReader.cpp25
5 files changed, 131 insertions, 5 deletions
diff --git a/api/current.txt b/api/current.txt
index 0c55e56..8fb2f48 100644
--- a/api/current.txt
+++ b/api/current.txt
@@ -11163,6 +11163,7 @@ package android.graphics {
field public static final int NV21 = 17; // 0x11
field public static final int PRIVATE = 34; // 0x22
field public static final int RAW10 = 37; // 0x25
+ field public static final int RAW12 = 38; // 0x26
field public static final int RAW_SENSOR = 32; // 0x20
field public static final int RGB_565 = 4; // 0x4
field public static final int UNKNOWN = 0; // 0x0
diff --git a/api/system-current.txt b/api/system-current.txt
index 16c80ef..845077e 100644
--- a/api/system-current.txt
+++ b/api/system-current.txt
@@ -11449,6 +11449,7 @@ package android.graphics {
field public static final int NV21 = 17; // 0x11
field public static final int PRIVATE = 34; // 0x22
field public static final int RAW10 = 37; // 0x25
+ field public static final int RAW12 = 38; // 0x26
field public static final int RAW_SENSOR = 32; // 0x20
field public static final int RGB_565 = 4; // 0x4
field public static final int UNKNOWN = 0; // 0x0
diff --git a/core/java/android/hardware/camera2/CameraCharacteristics.java b/core/java/android/hardware/camera2/CameraCharacteristics.java
index a0217c2..1503bf5 100644
--- a/core/java/android/hardware/camera2/CameraCharacteristics.java
+++ b/core/java/android/hardware/camera2/CameraCharacteristics.java
@@ -991,7 +991,8 @@ public final class CameraCharacteristics extends CameraMetadata<CameraCharacteri
* <ul>
* <li>Processed (but stalling): any non-RAW format with a stallDurations &gt; 0.
* Typically JPEG format (ImageFormat#JPEG).</li>
- * <li>Raw formats: ImageFormat#RAW_SENSOR, ImageFormat#RAW10 and ImageFormat#RAW_OPAQUE.</li>
+ * <li>Raw formats: ImageFormat#RAW_SENSOR, ImageFormat#RAW10, ImageFormat#RAW12,
+ * and ImageFormat#RAW_OPAQUE.</li>
* <li>Processed (but not-stalling): any non-RAW format without a stall duration.
* Typically ImageFormat#YUV_420_888, ImageFormat#NV21, ImageFormat#YV12.</li>
* </ul>
@@ -1023,6 +1024,7 @@ public final class CameraCharacteristics extends CameraMetadata<CameraCharacteri
* <ul>
* <li>ImageFormat#RAW_SENSOR</li>
* <li>ImageFormat#RAW10</li>
+ * <li>ImageFormat#RAW12</li>
* <li>Opaque <code>RAW</code></li>
* </ul>
* <p>LEGACY mode devices ({@link CameraCharacteristics#INFO_SUPPORTED_HARDWARE_LEVEL android.info.supportedHardwareLevel} <code>==</code> LEGACY)
diff --git a/graphics/java/android/graphics/ImageFormat.java b/graphics/java/android/graphics/ImageFormat.java
index ada8c12..c63c8ba 100644
--- a/graphics/java/android/graphics/ImageFormat.java
+++ b/graphics/java/android/graphics/ImageFormat.java
@@ -356,6 +356,108 @@ public class ImageFormat {
public static final int RAW10 = 0x25;
/**
+ * <p>
+ * Android 12-bit raw format
+ * </p>
+ * <p>
+ * This is a single-plane, 12-bit per pixel, densely packed (in each row),
+ * unprocessed format, usually representing raw Bayer-pattern images coming
+ * from an image sensor.
+ * </p>
+ * <p>
+ * In an image buffer with this format, starting from the first pixel of each
+ * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
+ * and second byte contains the top 8 bits of first and second pixel. The third
+ * byte contains the 4 least significant bits of the two pixels, the exact layout
+ * data for each two consecutive pixels is illustrated below (Pi[j] stands for
+ * the jth bit of the ith pixel):
+ * </p>
+ * <table>
+ * <thead>
+ * <tr>
+ * <th align="center"></th>
+ * <th align="center">bit 7</th>
+ * <th align="center">bit 6</th>
+ * <th align="center">bit 5</th>
+ * <th align="center">bit 4</th>
+ * <th align="center">bit 3</th>
+ * <th align="center">bit 2</th>
+ * <th align="center">bit 1</th>
+ * <th align="center">bit 0</th>
+ * </tr>
+ * </thead> <tbody>
+ * <tr>
+ * <td align="center">Byte 0:</td>
+ * <td align="center">P0[11]</td>
+ * <td align="center">P0[10]</td>
+ * <td align="center">P0[ 9]</td>
+ * <td align="center">P0[ 8]</td>
+ * <td align="center">P0[ 7]</td>
+ * <td align="center">P0[ 6]</td>
+ * <td align="center">P0[ 5]</td>
+ * <td align="center">P0[ 4]</td>
+ * </tr>
+ * <tr>
+ * <td align="center">Byte 1:</td>
+ * <td align="center">P1[11]</td>
+ * <td align="center">P1[10]</td>
+ * <td align="center">P1[ 9]</td>
+ * <td align="center">P1[ 8]</td>
+ * <td align="center">P1[ 7]</td>
+ * <td align="center">P1[ 6]</td>
+ * <td align="center">P1[ 5]</td>
+ * <td align="center">P1[ 4]</td>
+ * </tr>
+ * <tr>
+ * <td align="center">Byte 2:</td>
+ * <td align="center">P1[ 3]</td>
+ * <td align="center">P1[ 2]</td>
+ * <td align="center">P1[ 1]</td>
+ * <td align="center">P1[ 0]</td>
+ * <td align="center">P0[ 3]</td>
+ * <td align="center">P0[ 2]</td>
+ * <td align="center">P0[ 1]</td>
+ * <td align="center">P0[ 0]</td>
+ * </tr>
+ * </tbody>
+ * </table>
+ * <p>
+ * This format assumes
+ * <ul>
+ * <li>a width multiple of 4 pixels</li>
+ * <li>an even height</li>
+ * </ul>
+ * </p>
+ *
+ * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
+ * not pixels.
+ *
+ * <p>
+ * Since this is a densely packed format, the pixel stride is always 0. The
+ * application must use the pixel data layout defined in above table to
+ * access each row data. When row stride is equal to {@code width * (12 / 8)}, there
+ * will be no padding bytes at the end of each row, the entire image data is
+ * densely packed. When stride is larger than {@code width * (12 / 8)}, padding
+ * bytes will be present at the end of each row.
+ * </p>
+ * <p>
+ * For example, the {@link android.media.Image} object can provide data in
+ * this format from a {@link android.hardware.camera2.CameraDevice} (if
+ * supported) through a {@link android.media.ImageReader} object. The
+ * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
+ * single plane containing the pixel data. The pixel stride is always 0 in
+ * {@link android.media.Image.Plane#getPixelStride()}, and the
+ * {@link android.media.Image.Plane#getRowStride()} describes the vertical
+ * neighboring pixel distance (in bytes) between adjacent rows.
+ * </p>
+ *
+ * @see android.media.Image
+ * @see android.media.ImageReader
+ * @see android.hardware.camera2.CameraDevice
+ */
+ public static final int RAW12 = 0x26;
+
+ /**
* Android dense depth image format.
*
* Each pixel is 16 bits, representing a depth ranging measurement from
@@ -445,6 +547,8 @@ public class ImageFormat {
return 16;
case RAW10:
return 10;
+ case RAW12:
+ return 12;
}
return -1;
}
@@ -472,6 +576,7 @@ public class ImageFormat {
case YUV_420_888:
case RAW_SENSOR:
case RAW10:
+ case RAW12:
case DEPTH16:
case DEPTH_POINT_CLOUD:
case PRIVATE:
diff --git a/media/jni/android_media_ImageReader.cpp b/media/jni/android_media_ImageReader.cpp
index 9fc7e8e..708c083 100644
--- a/media/jni/android_media_ImageReader.cpp
+++ b/media/jni/android_media_ImageReader.cpp
@@ -431,6 +431,19 @@ static void Image_getLockedBufferInfo(JNIEnv* env, CpuConsumer::LockedBuffer* bu
pData = buffer->data;
dataSize = buffer->stride * buffer->height;
break;
+ case HAL_PIXEL_FORMAT_RAW12:
+ // Single plane 10bpp bayer data.
+ ALOG_ASSERT(idx == 0, "Wrong index: %d", idx);
+ LOG_ALWAYS_FATAL_IF(buffer->width % 4,
+ "Width is not multiple of 4 %d", buffer->width);
+ LOG_ALWAYS_FATAL_IF(buffer->height % 2,
+ "Height is not even %d", buffer->height);
+ LOG_ALWAYS_FATAL_IF(buffer->stride < (buffer->width * 12 / 8),
+ "stride (%d) should be at least %d",
+ buffer->stride, buffer->width * 12 / 8);
+ pData = buffer->data;
+ dataSize = buffer->stride * buffer->height;
+ break;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
// Single plane, 32bpp.
@@ -492,8 +505,10 @@ static jint Image_imageGetPixelStride(JNIEnv* env, CpuConsumer::LockedBuffer* bu
break;
case HAL_PIXEL_FORMAT_BLOB:
case HAL_PIXEL_FORMAT_RAW10:
- // Blob is used for JPEG data, RAW10 is used for 10-bit raw data, they are
- // single plane, row and pixel strides are 0.
+ case HAL_PIXEL_FORMAT_RAW12:
+ // Blob is used for JPEG data, RAW10 and RAW12 is used for 10-bit and 12-bit raw data,
+ // those are single plane data with pixel stride 0 since they don't really have a
+ // well defined pixel stride
ALOG_ASSERT(idx == 0, "Wrong index: %d", idx);
pixelStride = 0;
break;
@@ -549,12 +564,14 @@ static jint Image_imageGetRowStride(JNIEnv* env, CpuConsumer::LockedBuffer* buff
rowStride = (idx == 0) ? buffer->stride : ALIGN(buffer->stride / 2, 16);
break;
case HAL_PIXEL_FORMAT_BLOB:
- // Blob is used for JPEG data, RAW10 is used for 10-bit raw data, they are
- // single plane, row and pixel strides are 0.
+ // Blob is used for JPEG data. It is single plane and has 0 row stride and
+ // 0 pixel stride
ALOG_ASSERT(idx == 0, "Wrong index: %d", idx);
rowStride = 0;
break;
case HAL_PIXEL_FORMAT_RAW10:
+ case HAL_PIXEL_FORMAT_RAW12:
+ // RAW10 and RAW12 are used for 10-bit and 12-bit raw data, they are single plane
ALOG_ASSERT(idx == 0, "Wrong index: %d", idx);
rowStride = buffer->stride;
break;