aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Chtchetkine <vchtchetkine@google.com>2011-09-04 09:51:40 -0700
committerVladimir Chtchetkine <vchtchetkine@google.com>2011-09-12 12:12:57 -0700
commitcf1c2c70dd99e7d78816ba9a558f9ed8c016862b (patch)
tree0e6893eedea447e20ecdb7ad66f9f05cda65d7af
parentc646f5e40ddda3d49b581ac0c78cf748b5dee74c (diff)
downloadexternal_qemu-cf1c2c70dd99e7d78816ba9a558f9ed8c016862b.zip
external_qemu-cf1c2c70dd99e7d78816ba9a558f9ed8c016862b.tar.gz
external_qemu-cf1c2c70dd99e7d78816ba9a558f9ed8c016862b.tar.bz2
Implements camera service in emulator
This is fully functional camera service implementation, that works (tested) on both, Linux and Windows. Fixed little/big endian bugs in the coverter code. Moved preview frames to use RGB32 instead of RGB565: RGB32 conversions are simpler and faster in the guest. Made "payload size send" a separate routine Change-Id: I96954f4c2cb4e4ef4dd6a20e41897d79c5037bae
-rw-r--r--Makefile.android1
-rw-r--r--android/camera/camera-capture-linux.c532
-rwxr-xr-xandroid/camera/camera-capture-windows.c398
-rw-r--r--android/camera/camera-capture.h58
-rwxr-xr-xandroid/camera/camera-common.h114
-rwxr-xr-xandroid/camera/camera-format-converters.c958
-rwxr-xr-xandroid/camera/camera-format-converters.h52
-rw-r--r--android/camera/camera-service.c1215
8 files changed, 2867 insertions, 461 deletions
diff --git a/Makefile.android b/Makefile.android
index 95287fd..6888173 100644
--- a/Makefile.android
+++ b/Makefile.android
@@ -58,6 +58,7 @@ ifeq ($(HOST_OS),windows)
MY_CFLAGS += -D_WIN32
# we need Win32 features that are available since Windows 2000 Professional/Server (NT 5.0)
MY_CFLAGS += -DWINVER=0x501
+ MY_LDLIBS += -lvfw32
endif
ifeq ($(HOST_ARCH),ppc)
diff --git a/android/camera/camera-capture-linux.c b/android/camera/camera-capture-linux.c
index 8944270..5c7e242 100644
--- a/android/camera/camera-capture-linux.c
+++ b/android/camera/camera-capture-linux.c
@@ -23,11 +23,6 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
-#include <linux/videodev2.h>
-#include "qemu-common.h"
-#include "android/utils/debug.h"
-#include "android/utils/misc.h"
-#include "android/utils/system.h"
#include "android/camera/camera-capture.h"
#include "android/camera/camera-format-converters.h"
@@ -47,6 +42,21 @@
#define CLEAR(x) memset (&(x), 0, sizeof(x))
+/* Pixel format descriptor.
+ * Instances of this descriptor are created during camera device enumeration, and
+ * an instance of this structure describing pixel format chosen for the camera
+ * emulation is saved by the camera factory service to represent an emulating
+ * camera properties.
+ */
+typedef struct QemuPixelFormat {
+ /* Pixel format in V4L2_PIX_FMT_XXX form. */
+ uint32_t format;
+ /* Frame dimensions supported by this format. */
+ CameraFrameDim* dims;
+ /* Number of frame dimensions supported by this format. */
+ int dim_num;
+} QemuPixelFormat;
+
/* Describes a framebuffer. */
typedef struct CameraFrameBuffer {
/* Framebuffer data. */
@@ -77,8 +87,6 @@ struct LinuxCameraDevice {
char* device_name;
/* Input channel. (default is 0) */
int input_channel;
- /* Requested pixel format. */
- uint32_t req_pixel_format;
/*
* Set by the framework after initializing camera connection.
@@ -88,7 +96,7 @@ struct LinuxCameraDevice {
int handle;
/* Device capabilities. */
struct v4l2_capability caps;
- /* Actual pixel format reported by the device. */
+ /* Actual pixel format reported by the device when capturing is started. */
struct v4l2_pix_format actual_pixel_format;
/* Defines type of the I/O to use to retrieve frames from the device. */
CameraIoType io_type;
@@ -98,6 +106,37 @@ struct LinuxCameraDevice {
int framebuffer_num;
};
+/* Preferred pixel formats arranged from the most to the least desired.
+ *
+ * More than anything else this array is defined by an existance of format
+ * conversion between the camera supported formats, and formats that are
+ * supported by camera framework in the guest system. Currently, guest supports
+ * only YV12 pixel format for data, and RGB32 for preview. So, this array should
+ * contain only those formats, for which converters are implemented. Generally
+ * speaking, the order in which entries should be arranged in this array matters
+ * only as far as conversion speed is concerned. So, formats with the fastest
+ * converters should be put closer to the top of the array, while slower ones
+ * should be put closer to the bottom. But as far as functionality is concerned,
+ * the orser doesn't matter, and any format can be placed anywhere in this array,
+ * as long as conversion for it exists.
+ */
+static const uint32_t _preferred_formats[] =
+{
+ /* Native format for the emulated camera: no conversion at all. */
+ V4L2_PIX_FMT_YVU420,
+ /* Continue with YCbCr: less math than with RGB */
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_YUYV,
+ /* End with RGB. */
+ V4L2_PIX_FMT_RGB32,
+ V4L2_PIX_FMT_RGB24,
+ V4L2_PIX_FMT_RGB565,
+};
+/* Number of entries in _preferred_formats array. */
+static const int _preferred_format_num =
+ sizeof(_preferred_formats)/sizeof(*_preferred_formats);
+
/*******************************************************************************
* Helper routines
******************************************************************************/
@@ -112,6 +151,41 @@ _xioctl(int fd, int request, void *arg) {
return r;
}
+/* Frees resource allocated for QemuPixelFormat instance, excluding the instance
+ * itself.
+ */
+static void _qemu_pixel_format_free(QemuPixelFormat* fmt)
+{
+ if (fmt != NULL) {
+ if (fmt->dims != NULL)
+ free(fmt->dims);
+ }
+}
+
+/* Returns an index of the given pixel format in an array containing pixel
+ * format descriptors.
+ * This routine is used to choose a pixel format for a camera device. The idea
+ * is that when the camera service enumerates all pixel formats for all cameras
+ * connected to the host, we need to choose just one, which would be most
+ * appropriate for camera emulation. To do that, the camera service will run
+ * formats, contained in _preferred_formats array against enumerated pixel
+ * formats to pick the first format that match.
+ * Param:
+ * fmt - Pixel format, for which to obtain the index.
+ * formats - Array containing list of pixel formats, supported by the camera
+ * device.
+ * size - Number of elements in the 'formats' array.
+ * Return:
+ * Index of the matched entry in the array, or -1 if no entry has been found.
+ */
+static int
+_get_format_index(uint32_t fmt, QemuPixelFormat* formats, int size)
+{
+ int f;
+ for (f = 0; f < size && formats[f].format != fmt; f++);
+ return f < size ? f : -1;
+}
+
/*******************************************************************************
* CameraFrameBuffer routines
******************************************************************************/
@@ -154,7 +228,7 @@ _free_framebuffers(CameraFrameBuffer* fb, int num, CameraIoType io_type)
break;
default:
- E("Invalid I/O type %d", io_type);
+ E("%s: Invalid I/O type %d", __FUNCTION__, io_type);
break;
}
}
@@ -203,7 +277,7 @@ _camera_device_free(LinuxCameraDevice* lcd)
}
AFREE(lcd);
} else {
- W("%s: No descriptor", __FUNCTION__);
+ E("%s: No descriptor", __FUNCTION__);
}
}
@@ -226,7 +300,7 @@ _camera_device_mmap_framebuffer(LinuxCameraDevice* cd)
* than requested. */
if(_xioctl(cd->handle, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
- D("%s: %s does not support memory mapping",
+ D("%s: Device '%s' does not support memory mapping",
__FUNCTION__, cd->device_name);
return 1;
} else {
@@ -303,7 +377,7 @@ _camera_device_user_framebuffer(LinuxCameraDevice* cd)
* than requested. */
if(_xioctl(cd->handle, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
- D("%s: %s does not support user pointers",
+ D("%s: Device '%s' does not support user pointers",
__FUNCTION__, cd->device_name);
return 1;
} else {
@@ -337,7 +411,7 @@ _camera_device_user_framebuffer(LinuxCameraDevice* cd)
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
- buf.m.userptr = cd->framebuffers[cd->framebuffer_num].data;
+ buf.m.userptr = (unsigned long)cd->framebuffers[cd->framebuffer_num].data;
buf.length = cd->framebuffers[cd->framebuffer_num].size;
if (_xioctl(cd->handle, VIDIOC_QBUF, &buf) < 0) {
E("%s: VIDIOC_QBUF has failed: %s", __FUNCTION__, strerror(errno));
@@ -379,6 +453,12 @@ _camera_device_direct_framebuffer(LinuxCameraDevice* cd)
return 0;
}
+/* Opens camera device.
+ * Param:
+ * cd - Camera device descriptor to open the camera for.
+ * Return:
+ * 0 on success, != 0 on failure.
+ */
static int
_camera_device_open(LinuxCameraDevice* cd)
{
@@ -391,14 +471,14 @@ _camera_device_open(LinuxCameraDevice* cd)
}
if (!S_ISCHR(st.st_mode)) {
- E("%s: %s is not a device", __FUNCTION__, cd->device_name);
+ E("%s: '%s' is not a device", __FUNCTION__, cd->device_name);
return -1;
}
/* Open handle to the device, and query device capabilities. */
cd->handle = open(cd->device_name, O_RDWR | O_NONBLOCK, 0);
if (cd->handle < 0) {
- E("%s: Cannot open camera device '%s': %s\n",
+ E("%s: Cannot open camera device '%s': %s",
__FUNCTION__, cd->device_name, strerror(errno));
return -1;
}
@@ -410,7 +490,7 @@ _camera_device_open(LinuxCameraDevice* cd)
cd->handle = -1;
return -1;
} else {
- E("%s: Unable to query camera '%s' capabilities",
+ E("%s: Unable to query capabilities for camera device '%s'",
__FUNCTION__, cd->device_name);
close(cd->handle);
cd->handle = -1;
@@ -430,25 +510,270 @@ _camera_device_open(LinuxCameraDevice* cd)
return 0;
}
+/* Enumerates frame sizes for the given pixel format.
+ * Param:
+ * cd - Opened camera device descriptor.
+ * fmt - Pixel format to enum frame sizes for.
+ * sizes - Upon success contains an array of supported frame sizes. The size of
+ * the array is defined by the value, returned from this routine. The caller
+ * is responsible for freeing memory allocated for this array.
+ * Return:
+ * On success returns number of entries in the 'sizes' array. On failure returns
+ * a negative value.
+ */
+static int
+_camera_device_enum_format_sizes(LinuxCameraDevice* cd,
+ uint32_t fmt,
+ CameraFrameDim** sizes)
+{
+ int n;
+ int sizes_num = 0;
+ int out_num = 0;
+ struct v4l2_frmsizeenum size_enum;
+ CameraFrameDim* arr;
+
+ /* Calculate number of supported sizes for the given format. */
+ for (n = 0; ; n++) {
+ size_enum.index = n;
+ size_enum.pixel_format = fmt;
+ if(_xioctl(cd->handle, VIDIOC_ENUM_FRAMESIZES, &size_enum)) {
+ break;
+ }
+ if (size_enum.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ /* Size is in the simpe width, height form. */
+ sizes_num++;
+ } else if (size_enum.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
+ /* Sizes are represented as min/max width and height with a step for
+ * each dimension. Since at the end we want to list each supported
+ * size in the array (that's the only format supported by the guest
+ * camera framework), we need to calculate how many array entries
+ * this will generate. */
+ const uint32_t dif_widths =
+ (size_enum.stepwise.max_width - size_enum.stepwise.min_width) /
+ size_enum.stepwise.step_width + 1;
+ const uint32_t dif_heights =
+ (size_enum.stepwise.max_height - size_enum.stepwise.min_height) /
+ size_enum.stepwise.step_height + 1;
+ sizes_num += dif_widths * dif_heights;
+ } else if (size_enum.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ /* Special stepwise case, when steps are set to 1. We still need to
+ * flatten this for the guest, but the array may be too big.
+ * Fortunately, we don't need to be fancy, so three sizes would be
+ * sufficient here: min, max, and one in the middle. */
+ sizes_num += 3;
+ }
+
+ }
+ if (sizes_num == 0) {
+ return 0;
+ }
+
+ /* Allocate, and initialize the array of supported entries. */
+ *sizes = (CameraFrameDim*)malloc(sizes_num * sizeof(CameraFrameDim));
+ if (*sizes == NULL) {
+ E("%s: Memory allocation failure", __FUNCTION__);
+ return -1;
+ }
+ arr = *sizes;
+ for (n = 0; out_num < sizes_num; n++) {
+ size_enum.index = n;
+ size_enum.pixel_format = fmt;
+ if(_xioctl(cd->handle, VIDIOC_ENUM_FRAMESIZES, &size_enum)) {
+ /* Errors are not welcome here anymore. */
+ E("%s: Unexpected failure while getting pixel dimensions: %s",
+ strerror(errno));
+ free(arr);
+ return -1;
+ }
+
+ if (size_enum.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ arr[out_num].width = size_enum.discrete.width;
+ arr[out_num].height = size_enum.discrete.height;
+ out_num++;
+ } else if (size_enum.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
+ uint32_t w;
+ for (w = size_enum.stepwise.min_width;
+ w <= size_enum.stepwise.max_width;
+ w += size_enum.stepwise.step_width) {
+ uint32_t h;
+ for (h = size_enum.stepwise.min_height;
+ h <= size_enum.stepwise.max_height;
+ h += size_enum.stepwise.step_height) {
+ arr[out_num].width = w;
+ arr[out_num].height = h;
+ out_num++;
+ }
+ }
+ } else if (size_enum.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ /* min */
+ arr[out_num].width = size_enum.stepwise.min_width;
+ arr[out_num].height = size_enum.stepwise.min_height;
+ out_num++;
+ /* one in the middle */
+ arr[out_num].width =
+ (size_enum.stepwise.min_width + size_enum.stepwise.max_width) / 2;
+ arr[out_num].height =
+ (size_enum.stepwise.min_height + size_enum.stepwise.max_height) / 2;
+ out_num++;
+ /* max */
+ arr[out_num].width = size_enum.stepwise.max_width;
+ arr[out_num].height = size_enum.stepwise.max_height;
+ out_num++;
+ }
+ }
+
+ return out_num;
+}
+
+/* Enumerates pixel formats, supported by the device.
+ * Note that this routine will enumerate only raw (uncompressed) formats.
+ * Param:
+ * cd - Opened camera device descriptor.
+ * fmts - Upon success contains an array of supported pixel formats. The size of
+ * the array is defined by the value, returned from this routine. The caller
+ * is responsible for freeing memory allocated for this array.
+ * Return:
+ * On success returns number of entries in the 'fmts' array. On failure returns
+ * a negative value.
+ */
+static int
+_camera_device_enum_pixel_formats(LinuxCameraDevice* cd, QemuPixelFormat** fmts)
+{
+ int n;
+ int fmt_num = 0;
+ int out_num = 0;
+ struct v4l2_fmtdesc fmt_enum;
+ QemuPixelFormat* arr;
+
+ /* Calculate number of supported formats. */
+ for (n = 0; ; n++) {
+ fmt_enum.index = n;
+ fmt_enum.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if(_xioctl(cd->handle, VIDIOC_ENUM_FMT, &fmt_enum)) {
+ break;
+ }
+ /* Skip the compressed ones. */
+ if ((fmt_enum.flags & V4L2_FMT_FLAG_COMPRESSED) == 0) {
+ fmt_num++;
+ }
+ }
+ if (fmt_num == 0) {
+ return 0;
+ }
+
+ /* Allocate, and initialize array for enumerated formats. */
+ *fmts = (QemuPixelFormat*)malloc(fmt_num * sizeof(QemuPixelFormat));
+ if (*fmts == NULL) {
+ E("%s: Memory allocation failure", __FUNCTION__);
+ return -1;
+ }
+ arr = *fmts;
+ memset(arr, 0, fmt_num * sizeof(QemuPixelFormat));
+ for (n = 0; out_num < fmt_num; n++) {
+ fmt_enum.index = n;
+ fmt_enum.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if(_xioctl(cd->handle, VIDIOC_ENUM_FMT, &fmt_enum)) {
+ int nn;
+ /* Errors are not welcome here anymore. */
+ E("%s: Unexpected failure while getting pixel format: %s",
+ strerror(errno));
+ for (nn = 0; nn < out_num; nn++) {
+ _qemu_pixel_format_free(arr + nn);
+ }
+ free(arr);
+ return -1;
+ }
+ /* Skip the compressed ones. */
+ if ((fmt_enum.flags & V4L2_FMT_FLAG_COMPRESSED) == 0) {
+ arr[out_num].format = fmt_enum.pixelformat;
+ /* Enumerate frame dimensions supported for this format. */
+ arr[out_num].dim_num =
+ _camera_device_enum_format_sizes(cd, fmt_enum.pixelformat,
+ &arr[out_num].dims);
+ if (arr[out_num].dim_num > 0) {
+ out_num++;
+ } else if (arr[out_num].dim_num < 0) {
+ int nn;
+ E("Unable to enumerate supported dimensions for pixel format %d",
+ fmt_enum.pixelformat);
+ for (nn = 0; nn < out_num; nn++) {
+ _qemu_pixel_format_free(arr + nn);
+ }
+ free(arr);
+ return -1;
+ }
+ }
+ }
+
+ return out_num;
+}
+
+/* Collects information about an opened camera device.
+ * The information collected in this routine contains list of pixel formats,
+ * supported by the device, and list of frame dimensions supported by the camera
+ * for each pixel format.
+ * Param:
+ * cd - Opened camera device descriptor.
+ * cis - Upon success contains information collected from the camera device.
+ * Return:
+ * 0 on success, != 0 on failure.
+ */
+static int
+_camera_device_get_info(LinuxCameraDevice* cd, CameraInfo* cis)
+{
+ int f;
+ int chosen = -1;
+ QemuPixelFormat* formats = NULL;
+ int num_pix_fmts = _camera_device_enum_pixel_formats(cd, &formats);
+ if (num_pix_fmts <= 0) {
+ return num_pix_fmts;
+ }
+
+ /* Lets see if camera supports preferred formats */
+ for (f = 0; f < _preferred_format_num; f++) {
+ chosen = _get_format_index(_preferred_formats[f], formats, num_pix_fmts);
+ if (chosen >= 0) {
+ break;
+ }
+ }
+ if (chosen < 0) {
+ /* Camera doesn't support any of the chosen formats. Then it doesn't
+ * matter which one we choose. Lets choose the first one. */
+ chosen = 0;
+ }
+
+ cis->device_name = ASTRDUP(cd->device_name);
+ cis->inp_channel = cd->input_channel;
+ cis->pixel_format = formats[chosen].format;
+ cis->frame_sizes_num = formats[chosen].dim_num;
+ /* Swap instead of copy. */
+ cis->frame_sizes = formats[chosen].dims;
+ formats[chosen].dims = NULL;
+ cis->in_use = 0;
+
+ for (f = 0; f < num_pix_fmts; f++) {
+ _qemu_pixel_format_free(formats + f);
+ }
+ free(formats);
+
+ return 0;
+}
+
/*******************************************************************************
* CameraDevice API
******************************************************************************/
CameraDevice*
-camera_device_open(const char* name,
- int inp_channel,
- uint32_t pixel_format)
+camera_device_open(const char* name, int inp_channel)
{
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
- struct v4l2_format fmt;
LinuxCameraDevice* cd;
/* Allocate and initialize the descriptor. */
cd = _camera_device_alloc();
cd->device_name = name != NULL ? ASTRDUP(name) : ASTRDUP("/dev/video0");
cd->input_channel = inp_channel;
- cd->req_pixel_format = pixel_format;
/* Open the device. */
if (_camera_device_open(cd)) {
@@ -463,45 +788,19 @@ camera_device_open(const char* name,
crop.c = cropcap.defrect; /* reset to default */
_xioctl (cd->handle, VIDIOC_S_CROP, &crop);
- /* Image settings. */
- CLEAR(fmt);
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fmt.fmt.pix.width = 0;
- fmt.fmt.pix.height = 0;
- fmt.fmt.pix.pixelformat = 0;
- if (_xioctl(cd->handle, VIDIOC_G_FMT, &fmt) < 0) {
- E("%s: Unable to obtain pixel format", __FUNCTION__);
- _camera_device_free(cd);
- return NULL;
- }
- if (_xioctl(cd->handle, VIDIOC_S_FMT, &fmt) < 0) {
- char fmt_str[5];
- memcpy(fmt_str, &cd->req_pixel_format, 4);
- fmt_str[4] = '\0';
- E("%s: Camera '%s' does not support requested pixel format '%s'",
- __FUNCTION__, cd->device_name, fmt_str);
- _camera_device_free(cd);
- return NULL;
- }
- /* VIDIOC_S_FMT has changed some properties of the structure, adjusting them
- * to the actual values, supported by the device. */
- memcpy(&cd->actual_pixel_format, &fmt.fmt.pix,
- sizeof(cd->actual_pixel_format));
- {
- char fmt_str[5];
- memcpy(fmt_str, &cd->req_pixel_format, 4);
- fmt_str[4] = '\0';
- D("%s: Camera '%s' uses pixel format '%s'",
- __FUNCTION__, cd->device_name, fmt_str);
- }
-
return &cd->header;
}
int
-camera_device_start_capturing(CameraDevice* ccd)
+camera_device_start_capturing(CameraDevice* ccd,
+ uint32_t pixel_format,
+ int frame_width,
+ int frame_height)
{
+ struct v4l2_format fmt;
LinuxCameraDevice* cd;
+ char fmt_str[5];
+ int r;
/* Sanity checks. */
if (ccd == NULL || ccd->opaque == NULL) {
@@ -510,13 +809,37 @@ camera_device_start_capturing(CameraDevice* ccd)
}
cd = (LinuxCameraDevice*)ccd->opaque;
+ /* Try to set pixel format with the given dimensions. */
+ CLEAR(fmt);
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmt.fmt.pix.width = frame_width;
+ fmt.fmt.pix.height = frame_height;
+ fmt.fmt.pix.pixelformat = pixel_format;
+ if (_xioctl(cd->handle, VIDIOC_S_FMT, &fmt) < 0) {
+ memcpy(fmt_str, &pixel_format, 4);
+ fmt_str[4] = '\0';
+ E("%s: Camera '%s' does not support pixel format '%s' with dimensions %dx%d",
+ __FUNCTION__, cd->device_name, fmt_str, frame_width, frame_height);
+ return -1;
+ }
+ /* VIDIOC_S_FMT may has changed some properties of the structure. Make sure
+ * that dimensions didn't change. */
+ if (fmt.fmt.pix.width != frame_width || fmt.fmt.pix.height != frame_height) {
+ memcpy(fmt_str, &pixel_format, 4);
+ fmt_str[4] = '\0';
+ E("%s: Dimensions %dx%d are wrong for pixel format '%s'",
+ __FUNCTION__, frame_width, frame_height, fmt_str);
+ return -1;
+ }
+ memcpy(&cd->actual_pixel_format, &fmt.fmt.pix, sizeof(struct v4l2_pix_format));
+
/*
* Lets initialize frame buffers, and see what kind of I/O we're going to
* use to retrieve frames.
*/
/* First, lets see if we can do mapped I/O (as most performant one). */
- int r = _camera_device_mmap_framebuffer(cd);
+ r = _camera_device_mmap_framebuffer(cd);
if (r < 0) {
/* Some critical error has ocurred. Bail out. */
return -1;
@@ -530,7 +853,7 @@ camera_device_start_capturing(CameraDevice* ccd)
} else if (r > 0) {
/* The only thing left for us is direct reading from the device. */
if (!(cd->caps.capabilities & V4L2_CAP_READWRITE)) {
- E("%s: Device '%s' doesn't support direct read",
+ E("%s: Don't know how to access frames on device '%s'",
__FUNCTION__, cd->device_name);
return -1;
}
@@ -547,12 +870,11 @@ camera_device_start_capturing(CameraDevice* ccd)
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (_xioctl (cd->handle, VIDIOC_STREAMON, &type) < 0) {
- E("%s: VIDIOC_STREAMON has failed: %s",
- __FUNCTION__, strerror(errno));
+ E("%s: VIDIOC_STREAMON on camera '%s' has failed: %s",
+ __FUNCTION__, cd->device_name, strerror(errno));
return -1;
}
}
-
return 0;
}
@@ -578,8 +900,8 @@ camera_device_stop_capturing(CameraDevice* ccd)
case CAMERA_IO_USERPTR:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (_xioctl(cd->handle, VIDIOC_STREAMOFF, &type) < 0) {
- E("%s: VIDIOC_STREAMOFF has failed: %s",
- __FUNCTION__, strerror(errno));
+ E("%s: VIDIOC_STREAMOFF on camera '%s' has failed: %s",
+ __FUNCTION__, cd->device_name, strerror(errno));
return -1;
}
break;
@@ -598,7 +920,9 @@ camera_device_stop_capturing(CameraDevice* ccd)
}
int
-camera_device_read_frame(CameraDevice* ccd, uint8_t* buff)
+camera_device_read_frame(CameraDevice* ccd,
+ ClientFrameBuffer* framebuffers,
+ int fbs_num)
{
LinuxCameraDevice* cd;
@@ -612,6 +936,8 @@ camera_device_read_frame(CameraDevice* ccd, uint8_t* buff)
if (cd->io_type == CAMERA_IO_DIRECT) {
/* Read directly from the device. */
size_t total_read_bytes = 0;
+ /* There is one framebuffer allocated for direct read. */
+ void* buff = cd->framebuffers[0].data;
do {
int read_bytes =
read(cd->handle, buff + total_read_bytes,
@@ -622,44 +948,55 @@ camera_device_read_frame(CameraDevice* ccd, uint8_t* buff)
case EAGAIN:
continue;
default:
- E("%s: Unable to read from the device: %s",
- __FUNCTION__, strerror(errno));
+ E("%s: Unable to read from the camera device '%s': %s",
+ __FUNCTION__, cd->device_name, strerror(errno));
return -1;
}
}
total_read_bytes += read_bytes;
} while (total_read_bytes < cd->actual_pixel_format.sizeimage);
- return 0;
+ /* Convert the read frame into the caller's framebuffers. */
+ return convert_frame(buff, cd->actual_pixel_format.pixelformat,
+ cd->actual_pixel_format.sizeimage,
+ cd->actual_pixel_format.width,
+ cd->actual_pixel_format.height,
+ framebuffers, fbs_num);
} else {
/* Dequeue next buffer from the device. */
struct v4l2_buffer buf;
+ int res;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = cd->io_type == CAMERA_IO_MEMMAP ? V4L2_MEMORY_MMAP :
V4L2_MEMORY_USERPTR;
- if (_xioctl(cd->handle, VIDIOC_DQBUF, &buf) < 0) {
- switch (errno) {
- case EAGAIN:
- return 1;
-
- case EIO:
- /* Could ignore EIO, see spec. */
- /* fall through */
- default:
- E("%s: VIDIOC_DQBUF has failed: %s",
- __FUNCTION__, strerror(errno));
- return 1;
+ for (;;) {
+ const int res = _xioctl(cd->handle, VIDIOC_DQBUF, &buf);
+ if (res >= 0) {
+ break;
+ } else if (errno == EAGAIN) {
+ return 1; // Tells the caller to repeat.
+ } else if (errno != EINTR && errno != EIO) {
+ E("%s: VIDIOC_DQBUF on camera '%s' has failed: %s",
+ __FUNCTION__, cd->device_name, strerror(errno));
+ return -1;
}
}
- /* Copy frame to the buffer. */
- memcpy(buff, cd->framebuffers[buf.index].data,
- cd->framebuffers[buf.index].size);
- /* Requeue the buffer with the device. */
+
+ /* Convert frame to the receiving buffers. */
+ res = convert_frame(cd->framebuffers[buf.index].data,
+ cd->actual_pixel_format.pixelformat,
+ cd->actual_pixel_format.sizeimage,
+ cd->actual_pixel_format.width,
+ cd->actual_pixel_format.height,
+ framebuffers, fbs_num);
+
+ /* Requeue the buffer back to the device. */
if (_xioctl(cd->handle, VIDIOC_QBUF, &buf) < 0) {
- D("%s: VIDIOC_QBUF has failed: %s",
- __FUNCTION__, strerror(errno));
+ W("%s: VIDIOC_QBUF on camera '%s' has failed: %s",
+ __FUNCTION__, cd->device_name, strerror(errno));
}
- return 0;
+
+ return res;
}
}
@@ -676,3 +1013,30 @@ camera_device_close(CameraDevice* ccd)
E("%s: Invalid camera device descriptor", __FUNCTION__);
}
}
+
+int
+enumerate_camera_devices(CameraInfo* cis, int max)
+{
+ char dev_name[24];
+ int found = 0;
+ int n;
+
+ for (n = 0; n < max; n++) {
+ CameraDevice* cd;
+
+ sprintf(dev_name, "/dev/video%d", n);
+ cd = camera_device_open(dev_name, 0);
+ if (cd != NULL) {
+ LinuxCameraDevice* lcd = (LinuxCameraDevice*)cd->opaque;
+ if (!_camera_device_get_info(lcd, cis + found)) {
+ cis[found].in_use = 0;
+ found++;
+ }
+ camera_device_close(cd);
+ } else {
+ break;
+ }
+ }
+
+ return found;
+}
diff --git a/android/camera/camera-capture-windows.c b/android/camera/camera-capture-windows.c
index ac571ce..e5120ab 100755
--- a/android/camera/camera-capture-windows.c
+++ b/android/camera/camera-capture-windows.c
@@ -18,17 +18,7 @@
* Contains code capturing video frames from a camera device on Windows.
* This code uses capXxx API, available via capCreateCaptureWindow.
*/
-/*
-#include <stddef.h>
-#include <windows.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <tchar.h>
-*/
-#include "qemu-common.h"
-#include "android/utils/debug.h"
-#include "android/utils/misc.h"
-#include "android/utils/system.h"
+
#include <vfw.h>
#include "android/camera/camera-capture.h"
#include "android/camera/camera-format-converters.h"
@@ -59,8 +49,6 @@ struct WndCameraDevice {
char* window_name;
/* Input channel (video driver index). (default is 0) */
int input_channel;
- /* Requested pixel format. */
- uint32_t req_pixel_format;
/*
* Set when framework gets initialized.
@@ -71,15 +59,27 @@ struct WndCameraDevice {
/* DC for frame bitmap manipulation. Null indicates that frames are not
* being capturing. */
HDC dc;
- /* Bitmap info for the frames obtained from the video capture driver.
- * This information will be used when we get bitmap bits via
- * GetDIBits API. */
+ /* Bitmap info for the frames obtained from the video capture driver. */
BITMAPINFO* frame_bitmap;
+ /* Bitmap info to use for GetDIBits calls. We can't really use bitmap info
+ * obtained from the video capture driver, because of the two issues. First,
+ * the driver may return an incompatible 'biCompresstion' value. For instance,
+ * sometimes it returns a "fourcc' pixel format value instead of BI_XXX,
+ * which causes GetDIBits to fail. Second, the bitmap that represents a frame
+ * that has been actually obtained from the device is not necessarily matches
+ * bitmap info that capture driver has returned. Sometimes the captured bitmap
+ * is a 32-bit RGB, while bit count reported by the driver is 16. So, to
+ * address these issues we need to have another bitmap info, that can be used
+ * in GetDIBits calls. */
+ BITMAPINFO* gdi_bitmap;
/* Framebuffer large enough to fit the frame. */
uint8_t* framebuffer;
- /* Converter used to convert camera frames to the format
- * expected by the client. */
- FormatConverter converter;
+ /* Framebuffer size. */
+ size_t framebuffer_size;
+ /* Framebuffer's pixel format. */
+ uint32_t pixel_format;
+ /* If != 0, frame bitmap is "top-down". If 0, frame bitmap is "bottom-up". */
+ int is_top_down;
};
/*******************************************************************************
@@ -114,9 +114,7 @@ _camera_device_free(WndCameraDevice* cd)
{
if (cd != NULL) {
if (cd->cap_window != NULL) {
- /* Since connecting to the driver is part of the successful
- * camera initialization, we're safe to assume that driver
- * is connected to the capture window. */
+ /* Disconnect from the driver. */
capDriverDisconnect(cd->cap_window);
if (cd->dc != NULL) {
@@ -129,6 +127,9 @@ _camera_device_free(WndCameraDevice* cd)
DestroyWindow(cd->cap_window);
cd->cap_window = NULL;
}
+ if (cd->gdi_bitmap != NULL) {
+ free(cd->gdi_bitmap);
+ }
if (cd->frame_bitmap != NULL) {
free(cd->frame_bitmap);
}
@@ -144,44 +145,14 @@ _camera_device_free(WndCameraDevice* cd)
}
}
-static uint32_t
-_camera_device_convertable_format(WndCameraDevice* cd)
-{
- if (cd != NULL) {
- switch (cd->header.pixel_format) {
- case BI_RGB:
- switch (cd->frame_bitmap->bmiHeader.biBitCount) {
- case 24:
- return V4L2_PIX_FMT_RGB24;
- default:
- E("%s: Camera API uses unsupported RGB format RGB%d",
- __FUNCTION__, cd->frame_bitmap->bmiHeader.biBitCount * 3);
- return 0;
- }
- break;
- default:
- E("%s: Camera API uses unsupported format %d",
- __FUNCTION__, cd->header.pixel_format);
- break;
- }
- } else {
- E("%s: No descriptor", __FUNCTION__);
- }
-
- return 0;
-}
-
/*******************************************************************************
* CameraDevice API
******************************************************************************/
CameraDevice*
-camera_device_open(const char* name,
- int inp_channel,
- uint32_t pixel_format)
+camera_device_open(const char* name, int inp_channel)
{
WndCameraDevice* wcd;
- size_t format_info_size;
/* Allocate descriptor and initialize windows-specific fields. */
wcd = _camera_device_alloc();
@@ -197,7 +168,6 @@ camera_device_open(const char* name,
return NULL;
}
wcd->input_channel = inp_channel;
- wcd->req_pixel_format = pixel_format;
/* Create capture window that is a child of HWND_MESSAGE window.
* We make it invisible, so it doesn't mess with the UI. Also
@@ -207,23 +177,44 @@ camera_device_open(const char* name,
wcd->cap_window = capCreateCaptureWindow(wcd->window_name, WS_CHILD, 0, 0,
0, 0, HWND_MESSAGE, 1);
if (wcd->cap_window == NULL) {
- E("%s: Unable to create video capturing window: %d",
- __FUNCTION__, GetLastError());
+ E("%s: Unable to create video capturing window '%s': %d",
+ __FUNCTION__, wcd->window_name, GetLastError());
_camera_device_free(wcd);
return NULL;
}
+ return &wcd->header;
+}
+
+int
+camera_device_start_capturing(CameraDevice* cd,
+ uint32_t pixel_format,
+ int frame_width,
+ int frame_height)
+{
+ WndCameraDevice* wcd;
+ HBITMAP bm_handle;
+ BITMAP bitmap;
+ size_t format_info_size;
+
+ if (cd == NULL || cd->opaque == NULL) {
+ E("%s: Invalid camera device descriptor", __FUNCTION__);
+ return -1;
+ }
+ wcd = (WndCameraDevice*)cd->opaque;
+
+ /* wcd->dc is an indicator of capturing: !NULL - capturing, NULL - not */
+ if (wcd->dc != NULL) {
+ W("%s: Capturing is already on on device '%s'",
+ __FUNCTION__, wcd->window_name);
+ return 0;
+ }
+
/* Connect capture window to the video capture driver. */
if (!capDriverConnect(wcd->cap_window, wcd->input_channel)) {
- /* Unable to connect to a driver. Need to cleanup everything
- * now since we're not going to receive camera_cleanup() call
- * after unsuccessful camera initialization. */
E("%s: Unable to connect to the video capturing driver #%d: %d",
__FUNCTION__, wcd->input_channel, GetLastError());
- DestroyWindow(wcd->cap_window);
- wcd->cap_window = NULL;
- _camera_device_free(wcd);
- return NULL;
+ return -1;
}
/* Get frame information from the driver. */
@@ -231,70 +222,46 @@ camera_device_open(const char* name,
if (format_info_size == 0) {
E("%s: Unable to get video format size: %d",
__FUNCTION__, GetLastError());
- return NULL;
+ return -1;
}
wcd->frame_bitmap = (BITMAPINFO*)malloc(format_info_size);
if (wcd->frame_bitmap == NULL) {
E("%s: Unable to allocate frame bitmap info buffer", __FUNCTION__);
- _camera_device_free(wcd);
- return NULL;
+ return -1;
}
if (!capGetVideoFormat(wcd->cap_window, wcd->frame_bitmap,
- format_info_size)) {
+ format_info_size)) {
E("%s: Unable to obtain video format: %d", __FUNCTION__, GetLastError());
- _camera_device_free(wcd);
- return NULL;
+ return -1;
}
- /* Initialize the common header. */
- wcd->header.width = wcd->frame_bitmap->bmiHeader.biWidth;
- wcd->header.height = wcd->frame_bitmap->bmiHeader.biHeight;
- wcd->header.bpp = wcd->frame_bitmap->bmiHeader.biBitCount;
- wcd->header.pixel_format = wcd->frame_bitmap->bmiHeader.biCompression;
- wcd->header.bpl = (wcd->header.width * wcd->header.bpp) / 8;
- if ((wcd->header.width * wcd->header.bpp) % 8) {
- // TODO: Is this correct to assume that new line in framebuffer is aligned
- // to a byte, or is it alogned to a multiple of bytes occupied by a pixel?
- wcd->header.bpl++;
- }
- wcd->header.framebuffer_size = wcd->header.bpl * wcd->header.height;
-
- /* Lets see if we have a convertor for the format. */
- wcd->converter = get_format_converted(_camera_device_convertable_format(wcd),
- wcd->req_pixel_format);
- if (wcd->converter == NULL) {
- E("%s: No converter available", __FUNCTION__);
- _camera_device_free(wcd);
- return NULL;
+ if (wcd->frame_bitmap->bmiHeader.biCompression > BI_PNG) {
+ D("%s: Video capturing driver has reported pixel format %.4s",
+ __FUNCTION__, (const char*)&wcd->frame_bitmap->bmiHeader.biCompression);
}
- /* Allocate framebuffer. */
- wcd->framebuffer = (uint8_t*)malloc(wcd->header.framebuffer_size);
- if (wcd->framebuffer == NULL) {
- E("%s: Unable to allocate framebuffer", __FUNCTION__);
- _camera_device_free(wcd);
- return NULL;
+ /* Most of the time frame bitmaps come in "bottom-up" form, where its origin
+ * is the lower-left corner. However, it could be in the normal "top-down"
+ * form with the origin in the upper-left corner. So, we must adjust the
+ * biHeight field, since the way "top-down" form is reported here is by
+ * setting biHeight to a negative value. */
+ if (wcd->frame_bitmap->bmiHeader.biHeight < 0) {
+ wcd->frame_bitmap->bmiHeader.biHeight =
+ -wcd->frame_bitmap->bmiHeader.biHeight;
+ wcd->is_top_down = 1;
+ } else {
+ wcd->is_top_down = 0;
}
- return &wcd->header;
-}
-
-int
-camera_device_start_capturing(CameraDevice* cd)
-{
- WndCameraDevice* wcd;
- if (cd == NULL || cd->opaque == NULL) {
- E("%s: Invalid camera device descriptor", __FUNCTION__);
+ /* Make sure that frame dimensions match. */
+ if (frame_width != wcd->frame_bitmap->bmiHeader.biWidth ||
+ frame_height != wcd->frame_bitmap->bmiHeader.biHeight) {
+ E("%s: Requested dimensions %dx%d do not match the actual %dx%d",
+ __FUNCTION__, frame_width, frame_height,
+ wcd->frame_bitmap->bmiHeader.biWidth,
+ wcd->frame_bitmap->bmiHeader.biHeight);
return -1;
}
- wcd = (WndCameraDevice*)cd->opaque;
-
- /* wcd->dc is an indicator of capturin: !NULL - capturing, NULL - not */
- if (wcd->dc != NULL) {
- /* It's already capturing. */
- W("%s: Capturing is already on %s", __FUNCTION__, wcd->window_name);
- return 0;
- }
/* Get DC for the capturing window that will be used when we deal with
* bitmaps obtained from the camera device during frame capturing. */
@@ -305,6 +272,105 @@ camera_device_start_capturing(CameraDevice* cd)
return -1;
}
+ /*
+ * At this point we need to grab a frame to properly setup framebuffer, and
+ * calculate pixel format. The problem is that bitmap information obtained
+ * from the driver doesn't necessarily match the actual bitmap we're going to
+ * obtain via capGrabFrame / capEditCopy / GetClipboardData
+ */
+
+ /* Grab a frame, and post it to the clipboard. Not very effective, but this
+ * is how capXxx API is operating. */
+ if (!capGrabFrameNoStop(wcd->cap_window) ||
+ !capEditCopy(wcd->cap_window) ||
+ !OpenClipboard(wcd->cap_window)) {
+ E("%s: Device '%s' is unable to save frame to the clipboard: %d",
+ __FUNCTION__, wcd->window_name, GetLastError());
+ return -1;
+ }
+
+ /* Get bitmap handle saved into clipboard. Note that bitmap is still
+ * owned by the clipboard here! */
+ bm_handle = (HBITMAP)GetClipboardData(CF_BITMAP);
+ if (bm_handle == NULL) {
+ E("%s: Device '%s' is unable to obtain frame from the clipboard: %d",
+ __FUNCTION__, wcd->window_name, GetLastError());
+ CloseClipboard();
+ return -1;
+ }
+
+ /* Get bitmap object that is initialized with the actual bitmap info. */
+ if (!GetObject(bm_handle, sizeof(BITMAP), &bitmap)) {
+ E("%s: Device '%s' is unable to obtain frame's bitmap: %d",
+ __FUNCTION__, wcd->window_name, GetLastError());
+ CloseClipboard();
+ return -1;
+ }
+
+ /* Now that we have all we need in 'bitmap' */
+ CloseClipboard();
+
+ /* Make sure that dimensions match. Othewise - fail. */
+ if (wcd->frame_bitmap->bmiHeader.biWidth != bitmap.bmWidth ||
+ wcd->frame_bitmap->bmiHeader.biHeight != bitmap.bmHeight ) {
+ E("%s: Requested dimensions %dx%d do not match the actual %dx%d",
+ __FUNCTION__, frame_width, frame_height,
+ wcd->frame_bitmap->bmiHeader.biWidth,
+ wcd->frame_bitmap->bmiHeader.biHeight);
+ return -1;
+ }
+
+ /* Create bitmap info that will be used with GetDIBits. */
+ wcd->gdi_bitmap = (BITMAPINFO*)malloc(wcd->frame_bitmap->bmiHeader.biSize);
+ if (wcd->gdi_bitmap == NULL) {
+ E("%s: Unable to allocate gdi bitmap info", __FUNCTION__);
+ return -1;
+ }
+ memcpy(wcd->gdi_bitmap, wcd->frame_bitmap,
+ wcd->frame_bitmap->bmiHeader.biSize);
+ wcd->gdi_bitmap->bmiHeader.biCompression = BI_RGB;
+ wcd->gdi_bitmap->bmiHeader.biBitCount = bitmap.bmBitsPixel;
+ wcd->gdi_bitmap->bmiHeader.biSizeImage = bitmap.bmWidthBytes * bitmap.bmWidth;
+ /* Adjust GDI's bitmap biHeight for proper frame direction ("top-down", or
+ * "bottom-up") We do this trick in order to simplify pixel format conversion
+ * routines, where we always assume "top-down" frames. The trick he is to
+ * have negative biHeight in 'gdi_bitmap' if driver provides "bottom-up"
+ * frames, and positive biHeight in 'gdi_bitmap' if driver provides "top-down"
+ * frames. This way GetGDIBits will always return "top-down" frames. */
+ if (wcd->is_top_down) {
+ wcd->gdi_bitmap->bmiHeader.biHeight =
+ wcd->frame_bitmap->bmiHeader.biHeight;
+ } else {
+ wcd->gdi_bitmap->bmiHeader.biHeight =
+ -wcd->frame_bitmap->bmiHeader.biHeight;
+ }
+
+ /* Allocate framebuffer. */
+ wcd->framebuffer = (uint8_t*)malloc(wcd->gdi_bitmap->bmiHeader.biSizeImage);
+ if (wcd->framebuffer == NULL) {
+ E("%s: Unable to allocate %d bytes for framebuffer",
+ __FUNCTION__, wcd->gdi_bitmap->bmiHeader.biSizeImage);
+ return -1;
+ }
+
+ /* Lets see what pixel format we will use. */
+ if (wcd->gdi_bitmap->bmiHeader.biBitCount == 16) {
+ wcd->pixel_format = V4L2_PIX_FMT_RGB565;
+ } else if (wcd->gdi_bitmap->bmiHeader.biBitCount == 24) {
+ wcd->pixel_format = V4L2_PIX_FMT_RGB24;
+ } else if (wcd->gdi_bitmap->bmiHeader.biBitCount == 32) {
+ wcd->pixel_format = V4L2_PIX_FMT_RGB32;
+ } else {
+ E("%s: Unsupported number of bits per pixel %d",
+ __FUNCTION__, wcd->gdi_bitmap->bmiHeader.biBitCount);
+ return -1;
+ }
+
+ D("%s: Capturing device '%s': %d bytes in %.4s [%dx%d] frame",
+ __FUNCTION__, wcd->window_name, wcd->gdi_bitmap->bmiHeader.biBitCount,
+ (const char*)&wcd->pixel_format, wcd->frame_bitmap->bmiHeader.biWidth,
+ wcd->frame_bitmap->bmiHeader.biHeight);
+
return 0;
}
@@ -317,24 +383,29 @@ camera_device_stop_capturing(CameraDevice* cd)
return -1;
}
wcd = (WndCameraDevice*)cd->opaque;
+
+ /* wcd->dc is the indicator of capture. */
if (wcd->dc == NULL) {
- W("%s: Windows %s is not captuing video", __FUNCTION__, wcd->window_name);
+ W("%s: Device '%s' is not capturing video",
+ __FUNCTION__, wcd->window_name);
return 0;
}
ReleaseDC(wcd->cap_window, wcd->dc);
wcd->dc = NULL;
+ /* Disconnect from the driver. */
+ capDriverDisconnect(wcd->cap_window);
+
return 0;
}
int
-camera_device_read_frame(CameraDevice* cd, uint8_t* buffer)
+camera_device_read_frame(CameraDevice* cd,
+ ClientFrameBuffer* framebuffers,
+ int fbs_num)
{
WndCameraDevice* wcd;
- /* Bitmap handle taken from the clipboard. */
HBITMAP bm_handle;
- /* Pitmap placed to the clipboard. */
- BITMAP bitmap;
/* Sanity checks. */
if (cd == NULL || cd->opaque == NULL) {
@@ -343,7 +414,7 @@ camera_device_read_frame(CameraDevice* cd, uint8_t* buffer)
}
wcd = (WndCameraDevice*)cd->opaque;
if (wcd->dc == NULL) {
- W("%s: Windows %s is not captuing video",
+ W("%s: Device '%s' is not captuing video",
__FUNCTION__, wcd->window_name);
return -1;
}
@@ -353,7 +424,7 @@ camera_device_read_frame(CameraDevice* cd, uint8_t* buffer)
if (!capGrabFrameNoStop(wcd->cap_window) ||
!capEditCopy(wcd->cap_window) ||
!OpenClipboard(wcd->cap_window)) {
- E("%s: %s: Unable to save frame to the clipboard: %d",
+ E("%s: Device '%s' is unable to save frame to the clipboard: %d",
__FUNCTION__, wcd->window_name, GetLastError());
return -1;
}
@@ -361,39 +432,39 @@ camera_device_read_frame(CameraDevice* cd, uint8_t* buffer)
/* Get bitmap handle saved into clipboard. Note that bitmap is still
* owned by the clipboard here! */
bm_handle = (HBITMAP)GetClipboardData(CF_BITMAP);
- CloseClipboard();
if (bm_handle == NULL) {
- E("%s: %s: Unable to obtain frame from the clipboard: %d",
+ E("%s: Device '%s' is unable to obtain frame from the clipboard: %d",
__FUNCTION__, wcd->window_name, GetLastError());
+ CloseClipboard();
return -1;
}
- /* Get bitmap information */
- if (!GetObject(bm_handle, sizeof(BITMAP), &bitmap)) {
- E("%s: %s Unable to obtain frame's bitmap: %d",
- __FUNCTION__, wcd->window_name, GetLastError());
- return -1;
+ /* Get bitmap buffer. */
+ if (wcd->gdi_bitmap->bmiHeader.biHeight > 0) {
+ wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->gdi_bitmap->bmiHeader.biHeight;
}
- /* Save bitmap bits to the framebuffer. */
if (!GetDIBits(wcd->dc, bm_handle, 0, wcd->frame_bitmap->bmiHeader.biHeight,
- wcd->framebuffer, wcd->frame_bitmap, DIB_RGB_COLORS)) {
- E("%s: %s: Unable to transfer frame to the framebuffer: %d",
+ wcd->framebuffer, wcd->gdi_bitmap, DIB_RGB_COLORS)) {
+ E("%s: Device '%s' is unable to transfer frame to the framebuffer: %d",
__FUNCTION__, wcd->window_name, GetLastError());
+ CloseClipboard();
return -1;
}
- /* Lets see if conversion is required. */
- if (_camera_device_convertable_format(wcd) == wcd->req_pixel_format) {
- /* Formats match. Just copy framebuffer over. */
- memcpy(buffer, wcd->framebuffer, wcd->header.framebuffer_size);
- } else {
- /* Formats do not match. Use the converter. */
- wcd->converter(wcd->framebuffer, wcd->header.width, wcd->header.height,
- buffer);
+ if (wcd->gdi_bitmap->bmiHeader.biHeight < 0) {
+ wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->gdi_bitmap->bmiHeader.biHeight;
}
- return 0;
+ CloseClipboard();
+
+ /* Convert framebuffer. */
+ return convert_frame(wcd->framebuffer,
+ wcd->pixel_format,
+ wcd->gdi_bitmap->bmiHeader.biSizeImage,
+ wcd->frame_bitmap->bmiHeader.biWidth,
+ wcd->frame_bitmap->bmiHeader.biHeight,
+ framebuffers, fbs_num);
}
void
@@ -407,3 +478,54 @@ camera_device_close(CameraDevice* cd)
_camera_device_free(wcd);
}
}
+
+int
+enumerate_camera_devices(CameraInfo* cis, int max)
+{
+ int inp_channel, found = 0;
+
+ for (inp_channel = 0; inp_channel < 10 && found < max; inp_channel++) {
+ char name[256];
+ CameraDevice* cd;
+
+ snprintf(name, sizeof(name), "%s%d", _default_window_name, found);
+ cd = camera_device_open(name, inp_channel);
+ if (cd != NULL) {
+ WndCameraDevice* wcd = (WndCameraDevice*)cd->opaque;
+
+ /* Unfortunately, on Windows we have to start capturing in order to get the
+ * actual frame properties. Note that on Windows camera_device_start_capturing
+ * will ignore the pixel format parameter, since it will be determined during
+ * the course of the routine. Also note that on Windows all frames will be
+ * 640x480. */
+ if (!camera_device_start_capturing(cd, V4L2_PIX_FMT_RGB32, 640, 480)) {
+ camera_device_stop_capturing(cd);
+ /* capXxx API supports only single frame size (always observed 640x480,
+ * but the actual numbers may vary). */
+ cis[found].frame_sizes = (CameraFrameDim*)malloc(sizeof(CameraFrameDim));
+ if (cis[found].frame_sizes != NULL) {
+ cis[found].device_name = ASTRDUP(name);
+ cis[found].inp_channel = inp_channel;
+ cis[found].frame_sizes->width = wcd->frame_bitmap->bmiHeader.biWidth;
+ cis[found].frame_sizes->height = wcd->frame_bitmap->bmiHeader.biHeight;
+ cis[found].frame_sizes_num = 1;
+ cis[found].pixel_format = wcd->pixel_format;
+ cis[found].in_use = 0;
+ found++;
+ } else {
+ E("%s: Unable to allocate dimensions", __FUNCTION__);
+ }
+ } else {
+ /* No more cameras. */
+ camera_device_close(cd);
+ break;
+ }
+ camera_device_close(cd);
+ } else {
+ /* No more cameras. */
+ break;
+ }
+ }
+
+ return found;
+}
diff --git a/android/camera/camera-capture.h b/android/camera/camera-capture.h
index f9ac2c9..3025a23 100644
--- a/android/camera/camera-capture.h
+++ b/android/camera/camera-capture.h
@@ -33,25 +33,27 @@
* inp_channel - On Linux defines input channel to use when communicating with
* the camera driver. On Windows contains an index (up to 10) of the driver
* to use to communicate with the camera device.
- * pixel_format - Defines pixel format in which the client of the camera API
- * expects the frames. Note that is this format doesn't match pixel formats
- * supported by the camera device, the camera API will provide a conversion.
- * If such conversion is not available, this routine will fail.
* Return:
* Initialized camera device descriptor on success, or NULL on failure.
*/
-extern CameraDevice* camera_device_open(const char* name,
- int inp_channel,
- uint32_t pixel_format);
+extern CameraDevice* camera_device_open(const char* name, int inp_channel);
/* Starts capturing frames from the camera device.
* Param:
* cd - Camera descriptor representing a camera device opened in
* camera_device_open routine.
+ * pixel_format - Defines pixel format for the captured frames. Must be one of
+ * the formats, supported by the camera device.
+ * width, height - Frame dimensions for the captured video frame. Must match
+ * dimensions supported by the camera for the pixel format defined by the
+ * 'pixel_format' parameter.
* Return:
* 0 on success, or non-zero value on failure.
*/
-extern int camera_device_start_capturing(CameraDevice* cd);
+extern int camera_device_start_capturing(CameraDevice* cd,
+ uint32_t pixel_format,
+ int frame_width,
+ int frame_height);
/* Stops capturing frames from the camera device.
* Param:
@@ -66,15 +68,21 @@ extern int camera_device_stop_capturing(CameraDevice* cd);
* Param:
* cd - Camera descriptor representing a camera device opened in
* camera_device_open routine.
- * buffer - Address of the buffer where to read the frame. Note that the buffer
- * must be large enough to contain the entire frame. Also note that due to
- * possible format conversion, required buffer size may differ from the
- * framebuffer size as reported by framebuffer_size int the CameraDevice
- * structure.
+ * framebuffers - Array of framebuffers where to read the frame. Size of this
+ * array is defined by the 'fbs_num' parameter. Note that the caller must
+ * make sure that buffers are large enough to contain entire frame captured
+ * from the device.
+ * fbs_num - Number of entries in the 'framebuffers' array.
* Return:
- * 0 on success, or non-zero value on failure.
+ * 0 on success, or non-zero value on failure. There is a special vaule 1
+ * returned from this routine which indicates that frames were not available in
+ * the device. This value is returned on Linux implementation when frame ioctl
+ * has returned EAGAIN error. The client should respond to this value by
+ * repeating the read, rather than reporting an error.
*/
-extern int camera_device_read_frame(CameraDevice* cd, uint8_t* buffer);
+extern int camera_device_read_frame(CameraDevice* cd,
+ ClientFrameBuffer* framebuffers,
+ int fbs_num);
/* Closes camera device, opened in camera_device_open routine.
* Param:
@@ -83,4 +91,24 @@ extern int camera_device_read_frame(CameraDevice* cd, uint8_t* buffer);
*/
extern void camera_device_close(CameraDevice* cd);
+/* Enumerates camera devices connected to the host, and collects information
+ * about each device.
+ * Apparently, camera framework in the guest will only accept the the YV12
+ * (V4L2_PIX_FMT_YVU420) pixel format. So, we don't really need to report all the
+ * pixel formats, supported by the camera device back to the guest. We can simpy
+ * pick any format that is supported by the device, and collect frame dimensions
+ * available for it. The only thing we can do is to specifically check, if camera
+ * support YV12, and choose it, in order to spare some CPU cycles on the
+ * conversion.
+ * Param:
+ * cis - An allocated array where to store informaion about found camera
+ * devices. For each found camera device an entry will be initialized in the
+ * array. It's responsibility of the caller to free the memory allocated for
+ * the entries.
+ * max - Maximum number of entries that can fit into the array.
+ * Return:
+ * Number of entries added to the 'cis' array on success, or < 0 on failure.
+ */
+extern int enumerate_camera_devices(CameraInfo* cis, int max);
+
#endif /* ANDROID_CAMERA_CAMERA_CAPTURE_H */
diff --git a/android/camera/camera-common.h b/android/camera/camera-common.h
index 2101dfc..0cf8cea 100755
--- a/android/camera/camera-common.h
+++ b/android/camera/camera-common.h
@@ -22,35 +22,119 @@
* camera emulation.
*/
+#include "qemu-common.h"
+#include "android/utils/debug.h"
+#include "android/utils/misc.h"
+#include "android/utils/system.h"
#ifdef _WIN32
/* Include declarations that are missing in non-Linux headers. */
#include "android/camera/camera-win.h"
#elif _DARWIN_C_SOURCE
/* Include declarations that are missing in non-Linux headers. */
#include "android/camera/camera-win.h"
+#else
+#include <linux/videodev2.h>
#endif /* _WIN32 */
+
+/*
+ * These are missing in the current linux/videodev2.h
+ */
+
+#ifndef V4L2_PIX_FMT_YVYU
+#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U')
+#endif /* V4L2_PIX_FMT_YVYU */
+#ifndef V4L2_PIX_FMT_VYUY
+#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y')
+#endif /* V4L2_PIX_FMT_VYUY */
+#ifndef V4L2_PIX_FMT_YUY2
+#define V4L2_PIX_FMT_YUY2 v4l2_fourcc('Y', 'U', 'Y', '2')
+#endif /* V4L2_PIX_FMT_YUY2 */
+#ifndef V4L2_PIX_FMT_YUNV
+#define V4L2_PIX_FMT_YUNV v4l2_fourcc('Y', 'U', 'N', 'V')
+#endif /* V4L2_PIX_FMT_YUNV */
+#ifndef V4L2_PIX_FMT_V422
+#define V4L2_PIX_FMT_V422 v4l2_fourcc('V', '4', '2', '2')
+#endif /* V4L2_PIX_FMT_V422 */
+#ifndef V4L2_PIX_FMT_YYVU
+#define V4L2_PIX_FMT_YYVU v4l2_fourcc('Y', 'Y', 'V', 'U')
+#endif /* V4L2_PIX_FMT_YYVU */
+
+
+/* Describes framebuffer, used by the client of camera capturing API.
+ * This descritptor is used in camera_device_read_frame call.
+ */
+typedef struct ClientFrameBuffer {
+ /* Pixel format used in the client framebuffer. */
+ uint32_t pixel_format;
+ /* Address of the client framebuffer. */
+ void* framebuffer;
+} ClientFrameBuffer;
+
+/* Describes frame dimensions.
+ */
+typedef struct CameraFrameDim {
+ /* Frame width. */
+ int width;
+ /* Frame height. */
+ int height;
+} CameraFrameDim;
+
+/* Camera information descriptor, containing properties of a camera connected
+ * to the host.
+ *
+ * Instances of this structure are created during camera device enumerations,
+ * and are considered to be constant everywhere else. The only exception to this
+ * rule is changing the 'in_use' flag during creation / destruction of a service
+ * representing that camera.
+ */
+typedef struct CameraInfo {
+ /* Device name for the camera. */
+ char* device_name;
+ /* Input channel for the camera. */
+ int inp_channel;
+ /* Pixel format chosen for the camera. */
+ uint32_t pixel_format;
+ /* Array of frame sizes supported for the pixel format chosen for the camera.
+ * The size of the array is defined by the frame_sizes_num field of this
+ * structure. */
+ CameraFrameDim* frame_sizes;
+ /* Number of frame sizes supported for the pixel format chosen
+ * for the camera. */
+ int frame_sizes_num;
+ /* In use status. When there is a camera service created for this camera,
+ * "in use" is set to one. Otherwise this flag is zet to 0. */
+ int in_use;
+} CameraInfo;
+
+/* Allocates CameraInfo instance. */
+static __inline__ CameraInfo* _camera_info_alloc(void)
+{
+ CameraInfo* ci;
+ ANEW0(ci);
+ return ci;
+}
+
+/* Frees all resources allocated for CameraInfo instance (including the
+ * instance itself).
+ */
+static __inline__ void _camera_info_free(CameraInfo* ci)
+{
+ if (ci != NULL) {
+ if (ci->device_name != NULL)
+ free(ci->device_name);
+ if (ci->frame_sizes != NULL)
+ free(ci->frame_sizes);
+ AFREE(ci);
+ }
+}
/* Describes a connected camera device.
* This is a pratform-independent camera device descriptor that is used in
- * the camera API. This descriptor also contains some essential camera
- * properties, so the client of this API can use them to properly prepare to
- * handle frame capturing.
+ * the camera API.
*/
typedef struct CameraDevice {
/* Opaque pointer used by the camera capturing API. */
void* opaque;
- /* Frame's width in number of pixels. */
- int width;
- /* Frame's height in number of pixels. */
- int height;
- /* Number of bytes encoding each pixel. */
- uint32_t bpp;
- /* Number of bytes encoding each line. */
- uint32_t bpl;
- /* Pixel format of the frame captured from the camera device. */
- uint32_t pixel_format;
- /* Total size in bytes of the framebuffer. */
- size_t framebuffer_size;
} CameraDevice;
#endif /* ANDROID_CAMERA_CAMERA_COMMON_H_ */
diff --git a/android/camera/camera-format-converters.c b/android/camera/camera-format-converters.c
index 139e5ab..d675e93 100755
--- a/android/camera/camera-format-converters.c
+++ b/android/camera/camera-format-converters.c
@@ -23,106 +23,916 @@
#include <linux/videodev2.h>
#endif
#include "android/camera/camera-format-converters.h"
-
-/* Describes a convertor for one pixel format to another. */
-typedef struct FormatConverterEntry {
- /* Pixel format to convert from. */
- uint32_t from_format;
- /* Pixel format to convert to. */
- uint32_t to_format;
- /* Address of the conversion routine. */
- FormatConverter converter;
-} FormatConverterEntry;
-
-/* Converts frame from RGB24 (8 bits per color) to NV12 (YUV420)
+
+#define D(...) VERBOSE_PRINT(camera,__VA_ARGS__)
+#define W(...) VERBOSE_PRINT(camera,__VA_ARGS__)
+#define E(...) VERBOSE_PRINT(camera,__VA_ARGS__)
+
+ /*
+ * NOTE: RGB and big/little endian considerations. Wherewer in this code RGB
+ * pixels are represented as WORD, or DWORD, the color order inside the
+ * WORD / DWORD matches the one that would occur if that WORD / DWORD would have
+ * been read from the typecasted framebuffer:
+ *
+ * const uint32_t rgb = *reinterpret_cast<const uint32_t*>(framebuffer);
+ *
+ * So, if this code runs on the little endian CPU, red color in 'rgb' would be
+ * masked as 0x000000ff, and blue color would be masked as 0x00ff0000, while if
+ * the code runs on a big endian CPU, the red color in 'rgb' would be masked as
+ * 0xff000000, and blue color would be masked as 0x0000ff00,
+ */
+
+/* Prototype of a routine that converts frame from one pixel format to another.
* Param:
- * rgb - RGB frame to convert.
- * width, height - Width, and height of the RGB frame.
- * yuv - Buffer to receive the converted frame. Note that this buffer must
+ * from - Frame to convert.
+ * width, height - Width, and height of the frame to convert.
+ * to - Buffer to receive the converted frame. Note that this buffer must
* be big enough to contain all the converted pixels!
*/
-static void
-_RGB8_to_YUV420(const uint8_t* rgb,
- int width,
- int height,
- uint8_t* yuv)
-{
- const uint8_t* rgb_current = rgb;
- int x, y, yi = 0;
- const int num_pixels = width * height;
- int ui = num_pixels;
- int vi = num_pixels + num_pixels / 4;
+typedef void (*FormatConverter)(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to);
+
+/*
+ * RGB565 color masks
+ */
+
+#ifndef HOST_WORDS_BIGENDIAN
+static const uint16_t kRed5 = 0x001f;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0xf800;
+#else // !HOST_WORDS_BIGENDIAN
+static const uint16_t kRed5 = 0xf800;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0x001f;
+#endif // !HOST_WORDS_BIGENDIAN
+
+/*
+ * RGB32 color masks
+ */
+
+#ifndef HOST_WORDS_BIGENDIAN
+static const uint32_t kRed8 = 0x000000ff;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x00ff0000;
+#else // !HOST_WORDS_BIGENDIAN
+static const uint32_t kRed8 = 0x00ff0000;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x000000ff;
+#endif // !HOST_WORDS_BIGENDIAN
+
+/*
+ * Extracting, and saving color bytes from / to WORD / DWORD RGB.
+ */
+
+#ifndef HOST_WORDS_BIGENDIAN
+/* Extract red, green, and blue bytes from RGB565 word. */
+#define R16(rgb) (uint8_t)(rgb & kRed5)
+#define G16(rgb) (uint8_t)((rgb & kGreen6) >> 5)
+#define B16(rgb) (uint8_t)((rgb & kBlue5) >> 11)
+/* Make 8 bits red, green, and blue, extracted from RGB565 word. */
+#define R16_32(rgb) (uint8_t)(((rgb & kRed5) << 3) | ((rgb & kRed5) >> 2))
+#define G16_32(rgb) (uint8_t)(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) (uint8_t)(((rgb & kBlue5) >> 8) | ((rgb & kBlue5) >> 14))
+/* Extract red, green, and blue bytes from RGB32 dword. */
+#define R32(rgb) (uint8_t)(rgb & kRed8)
+#define G32(rgb) (uint8_t)(((rgb & kGreen8) >> 8) & 0xff)
+#define B32(rgb) (uint8_t)(((rgb & kBlue8) >> 16) & 0xff)
+/* Build RGB565 word from red, green, and blue bytes. */
+#define RGB565(r, g, b) (uint16_t)(((((uint16_t)(b) << 6) | g) << 5) | r)
+/* Build RGB32 dword from red, green, and blue bytes. */
+#define RGB32(r, g, b) (uint32_t)(((((uint32_t)(b) << 8) | g) << 8) | r)
+#else // !HOST_WORDS_BIGENDIAN
+/* Extract red, green, and blue bytes from RGB565 word. */
+#define R16(rgb) (uint8_t)((rgb & kRed5) >> 11)
+#define G16(rgb) (uint8_t)((rgb & kGreen6) >> 5)
+#define B16(rgb) (uint8_t)(rgb & kBlue5)
+/* Make 8 bits red, green, and blue, extracted from RGB565 word. */
+#define R16_32(rgb) (uint8_t)(((rgb & kRed5) >> 8) | ((rgb & kRed5) >> 14))
+#define G16_32(rgb) (uint8_t)(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) (uint8_t)(((rgb & kBlue5) << 3) | ((rgb & kBlue5) >> 2))
+/* Extract red, green, and blue bytes from RGB32 dword. */
+#define R32(rgb) (uint8_t)((rgb & kRed8) >> 16)
+#define G32(rgb) (uint8_t)((rgb & kGreen8) >> 8)
+#define B32(rgb) (uint8_t)(rgb & kBlue8)
+/* Build RGB565 word from red, green, and blue bytes. */
+#define RGB565(r, g, b) (uint16_t)(((((uint16_t)(r) << 6) | g) << 5) | b)
+/* Build RGB32 dword from red, green, and blue bytes. */
+#define RGB32(r, g, b) (uint32_t)(((((uint32_t)(r) << 8) | g) << 8) | b)
+#endif // !HOST_WORDS_BIGENDIAN
+
+/* An union that simplifies breaking 32 bit RGB into separate R, G, and B colors.
+ */
+typedef union RGB32_t {
+ uint32_t color;
+ struct {
+#ifndef HOST_WORDS_BIGENDIAN
+ uint8_t r; uint8_t g; uint8_t b; uint8_t a;
+#else // !HOST_WORDS_BIGENDIAN
+ uint8_t a; uint8_t b; uint8_t g; uint8_t r;
+#endif // HOST_WORDS_BIGENDIAN
+ };
+} RGB32_t;
+
+/* Clips a value to the unsigned 0-255 range, treating negative values as zero.
+ */
+static __inline__ int
+clamp(int x)
+{
+ if (x > 255) return 255;
+ if (x < 0) return 0;
+ return x;
+}
+
+/********************************************************************************
+ * Basics of RGB -> YUV conversion
+ *******************************************************************************/
+
+/*
+ * RGB -> YUV conversion macros
+ */
+#define RGB2Y(r, g, b) (uint8_t)(((66 * (r) + 129 * (g) + 25 * (b) + 128) >> 8) + 16)
+#define RGB2U(r, g, b) (uint8_t)(((-38 * (r) - 74 * (g) + 112 * (b) + 128) >> 8) + 128)
+#define RGB2V(r, g, b) (uint8_t)(((112 * (r) - 94 * (g) - 18 * (b) + 128) >> 8) + 128)
+/* Converts R8 G8 B8 color to YUV. */
+static __inline__ void
+R8G8B8ToYUV(uint8_t r, uint8_t g, uint8_t b, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ *y = RGB2Y((int)r, (int)g, (int)b);
+ *u = RGB2U((int)r, (int)g, (int)b);
+ *v = RGB2V((int)r, (int)g, (int)b);
+}
+
+/* Converts RGB565 color to YUV. */
+static __inline__ void
+RGB565ToYUV(uint16_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ R8G8B8ToYUV(R16_32(rgb), G16_32(rgb), B16_32(rgb), y, u, v);
+}
+
+/* Converts RGB32 color to YUV. */
+static __inline__ void
+RGB32ToYUV(uint32_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ RGB32_t rgb_c;
+ rgb_c.color = rgb;
+ R8G8B8ToYUV(rgb_c.r, rgb_c.g, rgb_c.b, y, u, v);
+}
+
+/********************************************************************************
+ * Basics of YUV -> RGB conversion.
+ * Note that due to the fact that guest uses RGB only on preview window, and the
+ * RGB format that is used is RGB565, we can limit YUV -> RGB conversions to
+ * RGB565 only.
+ *******************************************************************************/
+
+/*
+ * YUV -> RGB conversion macros
+ */
+
+/* "Optimized" macros that take specialy prepared Y, U, and V values:
+ * C = Y - 16
+ * D = U - 128
+ * E = V - 128
+ */
+#define YUV2RO(C, D, E) clamp((298 * (C) + 409 * (E) + 128) >> 8)
+#define YUV2GO(C, D, E) clamp((298 * (C) - 100 * (D) - 208 * (E) + 128) >> 8)
+#define YUV2BO(C, D, E) clamp((298 * (C) + 516 * (D) + 128) >> 8)
+
+/*
+ * Main macros that take the original Y, U, and V values
+ */
+#define YUV2R(y, u, v) clamp((298 * ((y)-16) + 409 * ((v)-128) + 128) >> 8)
+#define YUV2G(y, u, v) clamp((298 * ((y)-16) - 100 * ((u)-128) - 208 * ((v)-128) + 128) >> 8)
+#define YUV2B(y, u, v) clamp((298 * ((y)-16) + 516 * ((u)-128) + 128) >> 8)
+
+
+/* Converts YUV color to RGB565. */
+static __inline__ uint16_t
+YUVToRGB565(int y, int u, int v)
+{
+ /* Calculate C, D, and E values for the optimized macro. */
+ y -= 16; u -= 128; v -= 128;
+ const uint16_t r = YUV2RO(y,u,v) >> 3;
+ const uint16_t g = YUV2GO(y,u,v) >> 2;
+ const uint16_t b = YUV2BO(y,u,v) >> 3;
+ return RGB565(r, g, b);
+}
+
+/* Converts YUV color to RGB32. */
+static __inline__ uint32_t
+YUVToRGB32(int y, int u, int v)
+{
+ /* Calculate C, D, and E values for the optimized macro. */
+ y -= 16; u -= 128; v -= 128;
+ RGB32_t rgb;
+ rgb.r = YUV2RO(y,u,v) & 0xff;
+ rgb.g = YUV2GO(y,u,v) & 0xff;
+ rgb.b = YUV2BO(y,u,v) & 0xff;
+ return rgb.color;
+}
+
+/********************************************************************************
+ * YUV -> RGB565 converters.
+ *******************************************************************************/
+
+/* Common converter for a variety of YUV 4:2:2 formats to RGB565.
+ * Meaning of the parameters is pretty much straight forward here, except for the
+ * 'next_Y'. In all YUV 4:2:2 formats, every two pixels are encoded in subseqent
+ * four bytes, that contain two Ys (one for each pixel), one U, and one V values
+ * that are shared between the two pixels. The only difference between formats is
+ * how Y,U, and V values are arranged inside the pair. The actual arrangment
+ * doesn't make any difference how to advance Us and Vs: subsequent Us and Vs are
+ * always four bytes apart. However, with Y things are a bit more messy inside
+ * the pair. The only thing that is certain here is that Ys for subsequent pairs
+ * are also always four bytes apart. And we have parameter 'next_Y' here that
+ * controls the distance between two Ys inside a pixel pair. */
+static void _YUY422_to_RGB565(const uint8_t* from_Y,
+ const uint8_t* from_U,
+ const uint8_t* from_V,
+ int next_Y,
+ int width,
+ int height,
+ uint16_t* rgb)
+{
+ int x, y;
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- const uint32_t b = rgb_current[0];
- const uint32_t g = rgb_current[1];
- const uint32_t r = rgb_current[2];
- rgb_current += 3;
- yuv[yi++] = (uint8_t)((66*r + 129*g + 25*b + 128) >> 8) + 16;
- if((x % 2 ) == 0 && (y % 2) == 0) {
- yuv[ui++] = (uint8_t)((-38*r - 74*g + 112*b + 128) >> 8 ) + 128;
- yuv[vi++] = (uint8_t)((112*r - 94*g - 18*b + 128) >> 8 ) + 128;
- }
+ for (x = 0; x < width; x += 2, from_Y += 4, from_U += 4, from_V += 4) {
+ const uint8_t u = *from_U, v = *from_V;
+ *rgb = YUVToRGB565(*from_Y, u, v); rgb++;
+ *rgb = YUVToRGB565(from_Y[next_Y], u, v); rgb++;
}
}
}
-/* Converts frame from RGB24 (8 bits per color) to NV21 (YVU420)
- * Param:
- * rgb - RGB frame to convert.
- * width, height - Width, and height of the RGB frame.
- * yuv - Buffer to receive the converted frame. Note that this buffer must
- * be big enough to contain all the converted pixels!
- */
-static void
-_RGB8_to_YVU420(const uint8_t* rgb,
- int width,
- int height,
- uint8_t* yuv)
-{
- const uint8_t* rgb_current = rgb;
- int x, y, yi = 0;
- const int num_pixels = width * height;
- int vi = num_pixels;
- int ui = num_pixels + num_pixels / 4;
+/* Converts YUYV frame into RGB565 frame. */
+static void _YUYV_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB565(from, from + 1, from + 3, 2, width, height, (uint16_t*)to);
+}
+
+/* Converts YVYU frame into RGB565 frame. */
+static void _YVYU_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB565(from, from + 3, from + 1, 2, width, height, (uint16_t*)to);
+}
+
+/* Converts UYVY frame into RGB565 frame. */
+static void _UYVY_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB565(from + 1, from, from + 2, 2, width, height, (uint16_t*)to);
+}
+
+/* Converts VYUY frame into RGB565 frame. */
+static void _VYUY_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB565(from + 1, from + 2, from, 2, width, height, (uint16_t*)to);
+}
+
+/* Converts YYUV frame into RGB565 frame. */
+static void _YYUV_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB565(from, from + 2, from + 3, 1, width, height, (uint16_t*)to);
+}
+
+/* Converts YYVU frame into RGB565 frame. */
+static void _YYVU_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB565(from, from + 3, from + 2, 1, width, height, (uint16_t*)to);
+}
+
+/********************************************************************************
+ * YUV -> RGB32 converters.
+ *******************************************************************************/
+
+/* Common converter for a variety of YUV 4:2:2 formats to RGB32.
+ * See _YUY422_to_RGB565 comments for explanations. */
+static void _YUY422_to_RGB32(const uint8_t* from_Y,
+ const uint8_t* from_U,
+ const uint8_t* from_V,
+ int next_Y,
+ int width,
+ int height,
+ uint32_t* rgb)
+{
+ int x, y;
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 2, from_Y += 4, from_U += 4, from_V += 4) {
+ const uint8_t u = *from_U, v = *from_V;
+ *rgb = YUVToRGB32(*from_Y, u, v); rgb++;
+ *rgb = YUVToRGB32(from_Y[next_Y], u, v); rgb++;
+ }
+ }
+}
+
+/* Converts YUYV frame into RGB32 frame. */
+static void _YUYV_to_RGB32(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB32(from, from + 1, from + 3, 2, width, height, (uint32_t*)to);
+}
+
+/* Converts YVYU frame into RGB32 frame. */
+static void _YVYU_to_RGB32(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB32(from, from + 3, from + 1, 2, width, height, (uint32_t*)to);
+}
+
+/* Converts UYVY frame into RGB32 frame. */
+static void _UYVY_to_RGB32(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB32(from + 1, from, from + 2, 2, width, height, (uint32_t*)to);
+}
+
+/* Converts VYUY frame into RGB32 frame. */
+static void _VYUY_to_RGB32(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB32(from + 1, from + 2, from, 2, width, height, (uint32_t*)to);
+}
+
+/* Converts YYUV frame into RGB32 frame. */
+static void _YYUV_to_RGB32(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB32(from, from + 2, from + 3, 1, width, height, (uint32_t*)to);
+}
+
+/* Converts YYVU frame into RGB32 frame. */
+static void _YYVU_to_RGB32(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_RGB32(from, from + 3, from + 2, 1, width, height, (uint32_t*)to);
+}
+
+/********************************************************************************
+ * YUV -> YV12 converters.
+ *******************************************************************************/
+
+/* Common converter for a variety of YUV 4:2:2 formats to YV12.
+ * See comments to _YUY422_to_RGB565 for more information. */
+static void _YUY422_to_YV12(const uint8_t* from_Y,
+ const uint8_t* from_U,
+ const uint8_t* from_V,
+ int next_Y,
+ int width,
+ int height,
+ uint8_t* yv12)
+{
+ const int total_pix = width * height;
+ uint8_t* to_Y = yv12;
+ uint8_t* to_U = yv12 + total_pix;
+ uint8_t* to_V = to_U + total_pix / 4;
+ uint8_t* c_U = to_U;
+ uint8_t* c_V = to_V;
+ int x, y;
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- const uint32_t b = rgb_current[0];
- const uint32_t g = rgb_current[1];
- const uint32_t r = rgb_current[2];
- rgb_current += 3;
- yuv[yi++] = (uint8_t)((66*r + 129*g + 25*b + 128) >> 8) + 16;
- if((x % 2 ) == 0 && (y % 2) == 0) {
- yuv[ui++] = (uint8_t)((-38*r - 74*g + 112*b + 128) >> 8 ) + 128;
- yuv[vi++] = (uint8_t)((112*r - 94*g - 18*b + 128) >> 8 ) + 128;
- }
+ for (x = 0; x < width; x += 2, to_Y += 2, c_U++, c_V++,
+ from_Y += 4, from_U += 4, from_V += 4) {
+ *to_Y = *from_Y; to_Y[1] = from_Y[next_Y];
+ *c_U = *from_U; *c_V = *from_V;
+ }
+ if (y & 0x1) {
+ /* Finished two pixel lines: move to the next U/V line */
+ to_U = c_U; to_V = c_V;
+ } else {
+ /* Reset U/V pointers to the beginning of the line */
+ c_U = to_U; c_V = to_V;
}
}
}
+/* Converts YUYV frame into YV12 frame. */
+static void _YUYV_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_YV12(from, from + 1, from + 3, 2, width, height, to);
+}
+
+/* Converts YVYU frame into YV12 frame. */
+static void _YVYU_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_YV12(from, from + 3, from + 1, 2, width, height, to);
+}
+
+/* Converts UYVY frame into YV12 frame. */
+static void _UYVY_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_YV12(from + 1, from, from + 2, 2, width, height, to);
+}
+
+/* Converts VYUY frame into YV12 frame. */
+static void _VYUY_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_YV12(from + 1, from + 2, from, 2, width, height, to);
+}
+
+/* Converts YYUV frame into YV12 frame. */
+static void _YYUV_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_YV12(from, from + 2, from + 3, 1, width, height, to);
+}
+
+/* Converts YYVU frame into YV12 frame. */
+static void _YYVU_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ _YUY422_to_YV12(from, from + 3, from + 2, 1, width, height, to);
+}
+
+/********************************************************************************
+ * RGB -> YV12 converters.
+ *******************************************************************************/
+
+/* Converts RGB565 frame into YV12 frame. */
+static void _RGB565_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ const int total_pix = width * height;
+ const uint16_t* rgb = (const uint16_t*)from;
+ uint8_t* to_Y = to;
+ uint8_t* to_Cb = to + total_pix;
+ uint8_t* to_Cr = to_Cb + total_pix / 4;
+ uint8_t* Cb = to_Cb;
+ uint8_t* Cr = to_Cr;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 2, to_Cb++, to_Cr++) {
+ RGB565ToYUV(*rgb, to_Y, to_Cb, to_Cr); rgb++; to_Y++;
+ RGB565ToYUV(*rgb, to_Y, to_Cb, to_Cr); rgb++; to_Y++;
+ }
+ if (y & 0x1) {
+ to_Cb = Cb; to_Cr = Cr;
+ } else {
+ Cb = to_Cb; Cr = to_Cr;
+ }
+ }
+}
+
+/* Converts RGB24 frame into YV12 frame. */
+static void _RGB24_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ const int total_pix = width * height;
+ /* Bytes per line: each line must be WORD aligned. */
+ const int bpl = (width * 3 + 1) & ~1;
+ const uint8_t* line_start = from;
+ uint8_t* to_Y = to;
+ uint8_t* to_Cb = to + total_pix;
+ uint8_t* to_Cr = to_Cb + total_pix / 4;
+ uint8_t* Cb = to_Cb;
+ uint8_t* Cr = to_Cr;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ from = line_start;
+ for (x = 0; x < width; x += 2, from += 6, to_Cb++, to_Cr++) {
+ R8G8B8ToYUV(from[0], from[1], from[2], to_Y, to_Cb, to_Cr); to_Y++;
+ R8G8B8ToYUV(from[3], from[4], from[5], to_Y, to_Cb, to_Cr); to_Y++;
+ }
+ if (y & 0x1) {
+ to_Cb = Cb; to_Cr = Cr;
+ } else {
+ Cb = to_Cb; Cr = to_Cr;
+ }
+ /* Advance to next line, keeping proper alignment. */
+ line_start += bpl;
+ }
+}
+
+/* Converts RGB32 frame into YV12 frame. */
+static void _RGB32_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ const int total_pix = width * height;
+ uint8_t* to_Y = to;
+ uint8_t* to_Cb = to + total_pix;
+ uint8_t* to_Cr = to_Cb + total_pix / 4;
+ uint8_t* Cb = to_Cb;
+ uint8_t* Cr = to_Cr;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 2, from += 8, to_Cb++, to_Cr++) {
+ R8G8B8ToYUV(from[0], from[1], from[2], to_Y, to_Cb, to_Cr); to_Y++;
+ R8G8B8ToYUV(from[4], from[5], from[6], to_Y, to_Cb, to_Cr); to_Y++;
+ }
+ if (y & 0x1) {
+ to_Cb = Cb; to_Cr = Cr;
+ } else {
+ Cb = to_Cb; Cr = to_Cr;
+ }
+ }
+}
+
+/********************************************************************************
+ * BGR -> YV12 converters.
+ *******************************************************************************/
+
+/* Converts BGR24 frame into YV12 frame. */
+static void _BGR24_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ const int total_pix = width * height;
+ /* Bytes per line: each line must be WORD aligned. */
+ const int bpl = (width * 3 + 1) & ~1;
+ const uint8_t* line_start = from;
+ uint8_t* to_Y = to;
+ uint8_t* to_Cb = to + total_pix;
+ uint8_t* to_Cr = to_Cb + total_pix / 4;
+ uint8_t* Cb = to_Cb;
+ uint8_t* Cr = to_Cr;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ from = line_start;
+ for (x = 0; x < width; x += 2, from += 6, to_Cb++, to_Cr++) {
+ R8G8B8ToYUV(from[2], from[1], from[0], to_Y, to_Cb, to_Cr); to_Y++;
+ R8G8B8ToYUV(from[5], from[4], from[3], to_Y, to_Cb, to_Cr); to_Y++;
+ }
+ if (y & 0x1) {
+ to_Cb = Cb; to_Cr = Cr;
+ } else {
+ Cb = to_Cb; Cr = to_Cr;
+ }
+ /* Advance to next line, keeping proper alignment. */
+ line_start += bpl;
+ }
+}
+
+/* Converts BGR32 frame into YV12 frame. */
+static void _BGR32_to_YV12(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ const int total_pix = width * height;
+ uint8_t* to_Y = to;
+ uint8_t* to_Cb = to + total_pix;
+ uint8_t* to_Cr = to_Cb + total_pix / 4;
+ uint8_t* Cb = to_Cb;
+ uint8_t* Cr = to_Cr;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 2, from += 8, to_Cb++, to_Cr++) {
+ R8G8B8ToYUV(from[2], from[1], from[0], to_Y, to_Cb, to_Cr); to_Y++;
+ R8G8B8ToYUV(from[6], from[5], from[4], to_Y, to_Cb, to_Cr); to_Y++;
+ }
+ if (y & 0x1) {
+ to_Cb = Cb; to_Cr = Cr;
+ } else {
+ Cb = to_Cb; Cr = to_Cr;
+ }
+ }
+}
+
+/********************************************************************************
+ * RGB -> RGB565 converters.
+ *******************************************************************************/
+
+/* Converts RGB24 frame into RGB565 frame. */
+static void _RGB24_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ /* Bytes per line: each line must be WORD aligned. */
+ const int bpl = (width * 3 + 1) & ~1;
+ const uint8_t* line_start = from;
+ uint16_t* rgb = (uint16_t*)to;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ from = line_start;
+ for (x = 0; x < width; x++, rgb++) {
+ const uint16_t r = *from >> 3; from++;
+ const uint16_t g = *from >> 2; from++;
+ const uint16_t b = *from >> 3; from++;
+ *rgb = b | (g << 5) | (r << 11);
+ }
+ /* Advance to next line, keeping proper alignment. */
+ line_start += bpl;
+ }
+}
+
+/* Converts RGB32 frame into RGB565 frame. */
+static void _RGB32_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ /* Bytes per line: each line must be WORD aligned. */
+ uint16_t* rgb = (uint16_t*)to;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++, rgb++, from++) {
+ const uint16_t r = *from >> 3; from++;
+ const uint16_t g = *from >> 2; from++;
+ const uint16_t b = *from >> 3; from++;
+ *rgb = b | (g << 5) | (r << 11);
+ }
+ }
+}
+
+/********************************************************************************
+ * BGR -> RGB565 converters.
+ *******************************************************************************/
+
+/* Converts BGR24 frame into RGB565 frame. */
+static void _BGR24_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ /* Bytes per line: each line must be WORD aligned. */
+ const int bpl = (width * 3 + 1) & ~1;
+ const uint8_t* line_start = from;
+ uint16_t* rgb = (uint16_t*)to;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ from = line_start;
+ for (x = 0; x < width; x++, rgb++) {
+ const uint16_t b = *from >> 3; from++;
+ const uint16_t g = *from >> 2; from++;
+ const uint16_t r = *from >> 3; from++;
+ *rgb = b | (g << 5) | (r << 11);
+ }
+ /* Advance to next line, keeping proper alignment. */
+ line_start += bpl;
+ }
+}
+
+/* Converts BGR32 frame into RGB565 frame. */
+static void _BGR32_to_RGB565(const uint8_t* from,
+ int width,
+ int height,
+ uint8_t* to)
+{
+ /* Bytes per line: each line must be WORD aligned. */
+ uint16_t* rgb = (uint16_t*)to;
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++, rgb++, from++) {
+ const uint16_t b = *from >> 3; from++;
+ const uint16_t g = *from >> 2; from++;
+ const uint16_t r = *from >> 3; from++;
+ *rgb = b | (g << 5) | (r << 11);
+ }
+ }
+}
+
+/* Describes a converter for one pixel format to another. */
+typedef struct FormatConverterEntry {
+ /* Pixel format to convert from. */
+ uint32_t from_format;
+ /* Pixel format to convert to. */
+ uint32_t to_format;
+ /* Address of the conversion routine. */
+ FormatConverter converter;
+} FormatConverterEntry;
+
+
/* Lists currently implemented converters. */
static const FormatConverterEntry _converters[] = {
- /* RGB24 -> NV12 */
- { V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_NV12, _RGB8_to_YUV420 },
- /* RGB24 -> YUV420 */
- { V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_YUV420, _RGB8_to_YUV420 },
- /* RGB24 -> NV21 */
- { V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_NV21, _RGB8_to_YVU420 },
+ /*
+ * YUV 4:2:2 variety -> YV12
+ */
+
+ /* YUYV -> YV12 */
+ { V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_YVU420, _YUYV_to_YV12 },
+ /* UYVY -> YV12 */
+ { V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YVU420, _UYVY_to_YV12 },
+ /* YVYU -> YV12 */
+ { V4L2_PIX_FMT_YVYU, V4L2_PIX_FMT_YVU420, _YVYU_to_YV12 },
+ /* VYUY -> YV12 */
+ { V4L2_PIX_FMT_VYUY, V4L2_PIX_FMT_YVU420, _VYUY_to_YV12 },
+ /* YYUV -> YV12 */
+ { V4L2_PIX_FMT_YYUV, V4L2_PIX_FMT_YVU420, _YYUV_to_YV12 },
+ /* YUY2 -> YV12 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YUY2, V4L2_PIX_FMT_YVU420, _YYUV_to_YV12 },
+ /* YUNV -> YV12 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YUNV, V4L2_PIX_FMT_YVU420, _YYUV_to_YV12 },
+ /* V422 -> YV12 This format is the same as YYUV */
+ { V4L2_PIX_FMT_V422, V4L2_PIX_FMT_YVU420, _YYUV_to_YV12 },
+ /* YYVU -> YV12 */
+ { V4L2_PIX_FMT_YYVU, V4L2_PIX_FMT_YVU420, _YYVU_to_YV12 },
+
+ /*
+ * YUV 4:2:2 variety -> RGB565
+ */
+
+ /* YUYV -> RGB565 */
+ { V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_RGB565, _YUYV_to_RGB565 },
+ /* UYVY -> RGB565 */
+ { V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_RGB565, _UYVY_to_RGB565 },
+ /* YVYU -> RGB565 */
+ { V4L2_PIX_FMT_YVYU, V4L2_PIX_FMT_RGB565, _YVYU_to_RGB565 },
+ /* VYUY -> RGB565 */
+ { V4L2_PIX_FMT_VYUY, V4L2_PIX_FMT_RGB565, _VYUY_to_RGB565 },
+ /* YYUV -> RGB565 */
+ { V4L2_PIX_FMT_YYUV, V4L2_PIX_FMT_RGB565, _YYUV_to_RGB565 },
+ /* YUY2 -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YUY2, V4L2_PIX_FMT_RGB565, _YUYV_to_RGB565 },
+ /* YUNV -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YUNV, V4L2_PIX_FMT_RGB565, _YUYV_to_RGB565 },
+ /* V422 -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_V422, V4L2_PIX_FMT_RGB565, _YUYV_to_RGB565 },
+ /* YYVU -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YYVU, V4L2_PIX_FMT_RGB565, _YYVU_to_RGB565 },
+
+ /*
+ * YUV 4:2:2 variety -> RGB32
+ */
+
+ /* YUYV -> RGB565 */
+ { V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_RGB32, _YUYV_to_RGB32 },
+ /* UYVY -> RGB565 */
+ { V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_RGB32, _UYVY_to_RGB32 },
+ /* YVYU -> RGB565 */
+ { V4L2_PIX_FMT_YVYU, V4L2_PIX_FMT_RGB32, _YVYU_to_RGB32 },
+ /* VYUY -> RGB565 */
+ { V4L2_PIX_FMT_VYUY, V4L2_PIX_FMT_RGB32, _VYUY_to_RGB32 },
+ /* YYUV -> RGB565 */
+ { V4L2_PIX_FMT_YYUV, V4L2_PIX_FMT_RGB32, _YYUV_to_RGB32 },
+ /* YUY2 -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YUY2, V4L2_PIX_FMT_RGB32, _YUYV_to_RGB32 },
+ /* YUNV -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YUNV, V4L2_PIX_FMT_RGB32, _YUYV_to_RGB32 },
+ /* V422 -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_V422, V4L2_PIX_FMT_RGB32, _YUYV_to_RGB32 },
+ /* YYVU -> RGB565 This format is the same as YYUV */
+ { V4L2_PIX_FMT_YYVU, V4L2_PIX_FMT_RGB32, _YYVU_to_RGB32 },
+
+ /*
+ * RGB variety -> YV12
+ */
+
+ /* RGB565 -> YV12 */
+ { V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_YVU420, _RGB565_to_YV12 },
+ /* RGB24 -> YV12 */
+ { V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_YVU420, _RGB24_to_YV12 },
+ /* RGB32 -> YV12 */
+ { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_YVU420, _RGB32_to_YV12 },
+
+ /*
+ * BGR variety -> YV12
+ */
+
+ /* BGR24 -> YV12 */
+ { V4L2_PIX_FMT_BGR24, V4L2_PIX_FMT_YVU420, _BGR24_to_YV12 },
+ /* BGR32 -> YV12 */
+ { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_YVU420, _BGR32_to_YV12 },
+
+ /*
+ * RGB variety -> RGB565
+ */
+
+ /* RGB24 -> RGB565 */
+ { V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_RGB565, _RGB24_to_RGB565 },
+ /* RGB32 -> RGB565 */
+ { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_RGB565, _RGB32_to_RGB565 },
+
+ /*
+ * BGR variety -> RGB565
+ */
+
+ /* BGR24 -> RGB565 */
+ { V4L2_PIX_FMT_BGR24, V4L2_PIX_FMT_RGB565, _BGR24_to_RGB565 },
+ /* BGR32 -> RGB565 */
+ { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_RGB565, _BGR32_to_RGB565 },
};
-FormatConverter
-get_format_converted(uint32_t from, uint32_t to)
+/* Gets an address of a routine that provides frame conversion for the
+ * given pixel formats.
+ * Param:
+ * from - Pixel format to convert from.
+ * to - Pixel format to convert to.
+ * Return:
+ * Address of an appropriate conversion routine, or NULL if no conversion
+ * routine exsits for the given pair of pixel formats.
+ */
+static FormatConverter
+_get_format_converter(uint32_t from, uint32_t to)
{
const int num_converters = sizeof(_converters) / sizeof(*_converters);
int n;
for (n = 0; n < num_converters; n++) {
if (_converters[n].from_format == from &&
- _converters[n].to_format == to) {
+ _converters[n].to_format == to) {
return _converters[n].converter;
}
}
-
+
+ E("%s: No converter found from %.4s to %.4s pixel formats",
+ __FUNCTION__, (const char*)&from, (const char*)&to);
return NULL;
}
+
+/********************************************************************************
+ * Public API
+ *******************************************************************************/
+
+int
+has_converter(uint32_t from, uint32_t to)
+{
+ return (from == to) ? 1 :
+ (_get_format_converter(from, to) != NULL);
+}
+
+int
+convert_frame(const void* frame,
+ uint32_t pixel_format,
+ size_t framebuffer_size,
+ int width,
+ int height,
+ ClientFrameBuffer* framebuffers,
+ int fbs_num)
+{
+ int n;
+
+ for (n = 0; n < fbs_num; n++) {
+ if (framebuffers[n].pixel_format == pixel_format) {
+ /* Same pixel format. No conversion needed. */
+ memcpy(framebuffers[n].framebuffer, frame, framebuffer_size);
+ } else {
+ /* Get the converter. Note that the client must have ensured the
+ * existence of the converter when it was starting the camera. */
+ FormatConverter convert =
+ _get_format_converter(pixel_format, framebuffers[n].pixel_format);
+ if (convert != NULL) {
+ convert(frame, width, height, framebuffers[n].framebuffer);
+ } else {
+ E("%s No converter from %.4s to %.4s for framebuffer # %d ",
+ __FUNCTION__, (const char*)&pixel_format,
+ (const char*)&framebuffers[n].pixel_format, n);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/android/camera/camera-format-converters.h b/android/camera/camera-format-converters.h
index 797ad48..6f1b492 100755
--- a/android/camera/camera-format-converters.h
+++ b/android/camera/camera-format-converters.h
@@ -20,32 +20,48 @@
/*
* Contains declaration of the API that allows converting frames from one
* pixel format to another.
+ *
+ * For the emulator, we really need to convert into two formats: YV12, which is
+ * used by the camera framework for video, and RGB32 for preview window.
*/
#include "camera-common.h"
-/* Prototype of a routine that converts frame from one pixel format to another.
- * Param:
- * from - Frame to convert.
- * width, height - Width, and height of the frame to convert.
- * to - Buffer to receive the converted frame. Note that this buffer must
- * be big enough to contain all the converted pixels!
- */
-typedef void (*FormatConverter)(const uint8_t* from,
- int width,
- int height,
- uint8_t* to);
-
-
-/* Gets an address of a routine that provides frame conversion for the
- * given pixel format.
+/* Checks if conversion between two pixel formats is available.
* Param:
* from - Pixel format to convert from.
* to - Pixel format to convert to.
* Return:
- * Address of an appropriate conversion routine, or NULL if no conversion
- * routine exsits for the given pair of pixel formats.
+ * boolean: 1 if converter is available, or 0 if no conversion exists.
*/
-extern FormatConverter get_format_converted(uint32_t from, uint32_t to);
+extern int has_converter(uint32_t from, uint32_t to);
+
+/* Converts a frame into multiple framebuffers.
+ * When camera service replies to a framebuffer request from the client, it
+ * usualy sends two framebuffers in the reply: one for video, and another for
+ * preview window. Since these two framebuffers have different pixel formats
+ * (most of the time), we need to do two conversions for each frame received from
+ * the camera. This is the main intention behind this routine: to have a one call
+ * that produces as many conversions as needed.
+ * Param:
+ * frame - Frame to convert.
+ * pixel_format - Defines pixel format for the converting framebuffer.
+ * framebuffer_size, width, height - Converting framebuffer byte size, width,
+ * and height.
+ * framebuffers - Array of framebuffers where to convert the frame. Size of this
+ * array is defined by the 'fbs_num' parameter. Note that the caller must
+ * make sure that buffers are large enough to contain entire frame captured
+ * from the device.
+ * fbs_num - Number of entries in the 'framebuffers' array.
+ * Return:
+ * 0 on success, or non-zero value on failure.
+*/
+extern int convert_frame(const void* frame,
+ uint32_t pixel_format,
+ size_t framebuffer_size,
+ int width,
+ int height,
+ ClientFrameBuffer* framebuffers,
+ int fbs_num);
#endif /* ANDROID_CAMERA_CAMERA_FORMAT_CONVERTERS_H */
diff --git a/android/camera/camera-service.c b/android/camera/camera-service.c
index 077809d..c3b309f 100644
--- a/android/camera/camera-service.c
+++ b/android/camera/camera-service.c
@@ -32,13 +32,30 @@
#define E(...) VERBOSE_PRINT(camera,__VA_ARGS__)
#define D_ACTIVE VERBOSE_CHECK(camera)
-#define SERVICE_NAME "camera"
+/* the T(...) macro is used to dump traffic */
+#define T_ACTIVE 0
+
+#if T_ACTIVE
+#define T(...) VERBOSE_PRINT(camera,__VA_ARGS__)
+#else
+#define T(...) ((void)0)
+#endif
+
+/* Defines name of the camera service. */
+#define SERVICE_NAME "camera"
+
+/* Maximum number of supported emulated cameras. */
+#define MAX_CAMERA 8
/* Camera sevice descriptor. */
typedef struct CameraServiceDesc CameraServiceDesc;
struct CameraServiceDesc {
+ /* Information about camera devices connected to the host.
+ * Note that once initialized, entries in this array are considered to be
+ * constant. */
+ CameraInfo camera_info[MAX_CAMERA];
/* Number of camera devices connected to the host. */
- int camera_count;
+ int camera_count;
};
/* One and only one camera service. */
@@ -51,16 +68,512 @@ static CameraServiceDesc _camera_service_desc;
/* Initializes camera service descriptor.
*/
static void
-_csDesc_init(CameraServiceDesc* csd)
+_camera_service_init(CameraServiceDesc* csd)
+{
+ /* Enumerate camera devices connected to the host. */
+ csd->camera_count = enumerate_camera_devices(csd->camera_info, MAX_CAMERA);
+ if (csd->camera_count >= 0) {
+ D("%s: Enumerated %d cameras connected to the host",
+ __FUNCTION__, csd->camera_count);
+ } else {
+ E("%s: Unable to enumerate camera devices", __FUNCTION__);
+ csd->camera_count = 0;
+ return;
+ }
+}
+
+/* Gets camera information for the given camera device name.
+ * Param:
+ * cs - Initialized camera service descriptor.
+ * name - Camera device name to look up the information for.
+ * Return:
+ * Camera information pointer on success, or NULL if no camera information has
+ * been found for the given device name. Note that camera information returned
+ * from this routine is constant.
+ */
+static CameraInfo*
+_camera_service_get_camera_info(CameraServiceDesc* cs, const char* name)
{
- csd->camera_count = 0;
+ int n;
+ for (n = 0; n < cs->camera_count; n++) {
+ if (!strcmp(cs->camera_info[n].device_name, name)) {
+ return &cs->camera_info[n];
+ }
+ }
+ return NULL;
+}
+
+/********************************************************************************
+ * Helper routines
+ *******************************************************************************/
+
+/* A strict 'int' version of the 'strtol'.
+ * This routine is implemented on top of the standard 'strtol' for 32/64 bit
+ * portability.
+ */
+static int
+strtoi(const char *nptr, char **endptr, int base)
+{
+ long val;
+
+ errno = 0;
+ val = strtol(nptr, endptr, base);
+ if (errno) {
+ return (val == LONG_MAX) ? INT_MAX : INT_MIN;
+ } else {
+ if (val == (int)val) {
+ return (int)val;
+ } else {
+ errno = ERANGE;
+ return val > 0 ? INT_MAX : INT_MIN;
+ }
+ }
+}
+
+/* Gets a parameter value out of the parameter string.
+ * All parameters that are passed to the camera service are formatted as such:
+ * "<name1>=<value1> <name2>=<value2> ... <nameN>=<valueN>"
+ * I.e.:
+ * - Every parameter must have a name, and a value.
+ * - Name and value must be separated with '='.
+ * - No spaces are allowed around '=' separating name and value.
+ * - Parameters must be separated with a single ' ' character.
+ * - No '=' character is allowed in name and in value.
+ * Param:
+ * params - String, containing the parameters.
+ * name - Parameter name.
+ * value - Upon success contains value for the given parameter.
+ * val_size - Size of the 'value' string buffer.
+ * Return:
+ * 0 on success, -1 if requested parameter is not found, or (a positive) number
+ * of bytes, required to make a copy of the parameter's value if 'value' string
+ * was too small to contain it.
+ */
+static int
+_get_param_value(const char* params, const char* name, char* value, int val_size)
+{
+ const char* val_end;
+ int len = strlen(name);
+ const char* par_end = params + strlen(params);
+ const char* par_start = strstr(params, name);
+
+ /* Search for 'name=' */
+ while (par_start != NULL) {
+ /* Make sure that we're within the parameters buffer. */
+ if ((par_end - par_start) < len) {
+ par_start = NULL;
+ break;
+ }
+ /* Make sure that par_start starts at the beginning of <name>, and only
+ * then check for '=' value separator. */
+ if ((par_start == params || (*(par_start - 1) == ' ')) &&
+ par_start[len] == '=') {
+ break;
+ }
+ /* False positive. Move on... */
+ par_start = strstr(par_start + 1, name);
+ }
+ if (par_start == NULL) {
+ return -1;
+ }
+
+ /* Advance past 'name=', and calculate value's string length. */
+ par_start += len + 1;
+ val_end = strchr(par_start, ' ');
+ if (val_end == NULL) {
+ val_end = par_start + strlen(par_start);
+ }
+ len = val_end - par_start;
+
+ /* Check if fits... */
+ if ((len + 1) <= val_size) {
+ memcpy(value, par_start, len);
+ value[len] = '\0';
+ return 0;
+ } else {
+ return len + 1;
+ }
+}
+
+/* Gets a parameter value out of the parameter string.
+ * This routine is similar to _get_param_value, except it will always allocate
+ * a string buffer for the value.
+ * Param:
+ * params - String, containing the parameters.
+ * name - Parameter name.
+ * value - Upon success contains an allocated string containint the value for
+ * the given parameter. The caller is responsible for freeing the buffer
+ * returned in this parameter on success.
+ * Return:
+ * 0 on success, -1 if requested parameter is not found, or -2 on
+ * memory failure.
+ */
+static int
+_get_param_value_alloc(const char* params, const char* name, char** value)
+{
+ char tmp;
+ int res;
+
+ /* Calculate size of string buffer required for the value. */
+ const int val_size = _get_param_value(params, name, &tmp, 0);
+ if (val_size < 0) {
+ *value = NULL;
+ return val_size;
+ }
+
+ /* Allocate string buffer, and retrieve the value. */
+ *value = (char*)malloc(val_size);
+ if (*value == NULL) {
+ E("%s: Unable to allocated %d bytes for string buffer.",
+ __FUNCTION__, val_size);
+ return -2;
+ }
+ res = _get_param_value(params, name, *value, val_size);
+ if (res) {
+ E("%s: Unable to retrieve value into allocated buffer.", __FUNCTION__);
+ free(*value);
+ *value = NULL;
+ }
+
+ return res;
+}
+
+/* Gets an integer parameter value out of the parameter string.
+ * Param:
+ * params - String, containing the parameters. See comments to _get_param_value
+ * routine on the parameters format.
+ * name - Parameter name. Parameter value must be a decimal number.
+ * value - Upon success contains integer value for the given parameter.
+ * Return:
+ * 0 on success, or -1 if requested parameter is not found, or -2 if parameter's
+ * format was bad (i.e. value was not a decimal number).
+ */
+static int
+_get_param_value_int(const char* params, const char* name, int* value)
+{
+ char val_str[64]; // Should be enough for all numeric values.
+ if (!_get_param_value(params, name, val_str, sizeof(val_str))) {
+ errno = 0;
+ *value = strtoi(val_str, (char**)NULL, 10);
+ if (errno) {
+ E("%s: Value '%s' of the parameter '%s' in '%s' is not a decimal number.",
+ __FUNCTION__, val_str, name, params);
+ return -2;
+ } else {
+ return 0;
+ }
+ } else {
+ return -1;
+ }
+}
+
+/* Extracts query name, and (optionally) query parameters from the query string.
+ * Param:
+ * query - Query string. Query string in the camera service are formatted as such:
+ * "<query name>[ <parameters>]",
+ * where parameters are optional, and if present, must be separated from the
+ * query name with a single ' '. See comments to _get_param_value routine
+ * for the format of the parameters string.
+ * query_name - Upon success contains query name extracted from the query
+ * string.
+ * query_name_size - Buffer size for 'query_name' string.
+ * query_param - Upon success contains a pointer to the beginning of the query
+ * parameters. If query has no parameters, NULL will be passed back with
+ * this parameter. This parameter is optional and can be NULL.
+ * Return:
+ * 0 on success, or number of bytes required for query name if 'query_name'
+ * string buffer was too small to contain it.
+ */
+static int
+_parse_query(const char* query,
+ char* query_name,
+ int query_name_size,
+ const char** query_param)
+{
+ /* Extract query name. */
+ const char* qend = strchr(query, ' ');
+ if (qend == NULL) {
+ qend = query + strlen(query);
+ }
+ if ((qend - query) >= query_name_size) {
+ return qend - query + 1;
+ }
+ memcpy(query_name, query, qend - query);
+ query_name[qend - query] = '\0';
+
+ /* Calculate query parameters pointer (if needed) */
+ if (query_param != NULL) {
+ if (*qend == ' ') {
+ qend++;
+ }
+ *query_param = (*qend == '\0') ? NULL : qend;
+ }
+
+ return 0;
+}
+
+/* Appends one string to another, growing the destination string buffer if
+ * needed.
+ * Param:
+ * str_buffer - Contains pointer to the destination string buffer. Content of
+ * this parameter can be NULL. Note that content of this parameter will
+ * change if string buffer has been reallocated.
+ * str_buf_size - Contains current buffer size of the string, addressed by
+ * 'str_buffer' parameter. Note that content of this parameter will change
+ * if string buffer has been reallocated.
+ * str - String to append.
+ * Return:
+ * 0 on success, or -1 on failure (memory allocation).
+ */
+static int
+_append_string(char** str_buf, size_t* str_buf_size, const char* str)
+{
+ const size_t offset = (*str_buf != NULL) ? strlen(*str_buf) : 0;
+ const size_t append_bytes = strlen(str) + 1;
+
+ /* Make sure these two match. */
+ if (*str_buf == NULL) {
+ *str_buf_size = 0;
+ }
+
+ if ((offset + append_bytes) > *str_buf_size) {
+ /* Reallocate string, so it can fit what's being append to it. Note that
+ * we reallocate a bit bigger buffer than is needed in order to minimize
+ * number of memory allocation calls in case there are more "appends"
+ * coming. */
+ const size_t required_mem = offset + append_bytes + 256;
+ char* new_buf = (char*)realloc(*str_buf, required_mem);
+ if (new_buf == NULL) {
+ E("%s: Unable to allocate %d bytes for a string",
+ __FUNCTION__, required_mem);
+ return -1;
+ }
+ *str_buf = new_buf;
+ *str_buf_size = required_mem;
+ }
+ memcpy(*str_buf + offset, str, append_bytes);
+
+ return 0;
+}
+
+/* Represents camera information as a string formatted as follows:
+ * 'name=<devname> channel=<num> pix=<format> framedims=<widh1xheight1,widh2xheight2,widhNxheightN>\n'
+ * Param:
+ * ci - Camera information descriptor to convert into a string.
+ * str - Pointer to the string buffer where to save the converted camera
+ * information descriptor. On entry, content of this parameter can be NULL.
+ * Note that string buffer addressed with this parameter may be reallocated
+ * in this routine, so (if not NULL) it must contain a buffer allocated with
+ * malloc. The caller is responsible for freeing string buffer returned in
+ * this parameter.
+ * str_size - Contains byte size of the buffer addressed by 'str' parameter.
+ * Return:
+ * 0 on success, or != 0 on failure.
+ */
+static int
+_camera_info_to_string(const CameraInfo* ci, char** str, size_t* str_size) {
+ int res;
+ int n;
+ char tmp[128];
+
+ /* Append device name. */
+ snprintf(tmp, sizeof(tmp), "name=%s ", ci->device_name);
+ res = _append_string(str, str_size, tmp);
+ if (res) {
+ return res;
+ }
+ /* Append input channel. */
+ snprintf(tmp, sizeof(tmp), "channel=%d ", ci->inp_channel);
+ res = _append_string(str, str_size, tmp);
+ if (res) {
+ return res;
+ }
+ /* Append pixel format. */
+ snprintf(tmp, sizeof(tmp), "pix=%d ", ci->pixel_format);
+ res = _append_string(str, str_size, tmp);
+ if (res) {
+ return res;
+ }
+ /* Append supported frame sizes. */
+ snprintf(tmp, sizeof(tmp), "framedims=%dx%d",
+ ci->frame_sizes[0].width, ci->frame_sizes[0].height);
+ res = _append_string(str, str_size, tmp);
+ if (res) {
+ return res;
+ }
+ for (n = 1; n < ci->frame_sizes_num; n++) {
+ snprintf(tmp, sizeof(tmp), ",%dx%d",
+ ci->frame_sizes[n].width, ci->frame_sizes[n].height);
+ res = _append_string(str, str_size, tmp);
+ if (res) {
+ return res;
+ }
+ }
+
+ /* Stringified camera properties should end with EOL. */
+ return _append_string(str, str_size, "\n");
+}
+
+/********************************************************************************
+ * Helpers for handling camera client queries
+ *******************************************************************************/
+
+/* Formats paload size according to the protocol, and sends it to the client.
+ * To simplify endianess handling we convert payload size to an eight characters
+ * string, representing payload size value in hexadecimal format.
+ * Param:
+ * qc - Qemu client to send the payload size to.
+ * payload_size - Payload size to report to the client.
+ */
+static void
+_qemu_client_reply_payload(QemudClient* qc, size_t payload_size)
+{
+ char payload_size_str[9];
+ snprintf(payload_size_str, sizeof(payload_size_str), "%08x", payload_size);
+ qemud_client_send(qc, (const uint8_t*)payload_size_str, 8);
+}
+
+/*
+ * Prefixes for replies to camera client queries.
+ */
+
+/* Success, no data to send in reply. */
+#define OK_REPLY "ok"
+/* Failure, no data to send in reply. */
+#define KO_REPLY "ko"
+/* Success, there are data to send in reply. */
+#define OK_REPLY_DATA OK_REPLY ":"
+/* Failure, there are data to send in reply. */
+#define KO_REPLY_DATA KO_REPLY ":"
+
+/* Builds and sends a reply to a query.
+ * All replies to a query in camera service have a prefix indicating whether the
+ * query has succeeded ("ok"), or failed ("ko"). The prefix can be followed by
+ * extra data, containing response to the query. In case there are extra data,
+ * they are separated from the prefix with a ':' character.
+ * Param:
+ * qc - Qemu client to send the reply to.
+ * ok_ko - An "ok", or "ko" selector, where 0 is for "ko", and !0 is for "ok".
+ * extra - Optional extra query data. Can be NULL.
+ * extra_size - Extra data size.
+ */
+static void
+_qemu_client_query_reply(QemudClient* qc,
+ int ok_ko,
+ const void* extra,
+ size_t extra_size)
+{
+ const char* ok_ko_str;
+ size_t payload_size;
+
+ /* Make sure extra_size is 0 if extra is NULL. */
+ if (extra == NULL && extra_size != 0) {
+ W("%s: 'extra' = NULL, while 'extra_size' = %d",
+ __FUNCTION__, extra, extra_size);
+ extra_size = 0;
+ }
+
+ /* Calculate total payload size, and select appropriate 'ok'/'ko' prefix */
+ if (extra_size) {
+ /* 'extra' size + 2 'ok'/'ko' bytes + 1 ':' separator byte. */
+ payload_size = extra_size + 3;
+ ok_ko_str = ok_ko ? OK_REPLY_DATA : KO_REPLY_DATA;
+ } else {
+ /* No extra data: just zero-terminated 'ok'/'ko'. */
+ payload_size = 3;
+ ok_ko_str = ok_ko ? OK_REPLY : KO_REPLY;
+ }
+
+ /* Send payload size first. */
+ _qemu_client_reply_payload(qc, payload_size);
+ /* Send 'ok[:]'/'ko[:]' next. Note that if there is no extra data, we still
+ * need to send a zero-terminator for 'ok'/'ko' string instead of the ':'
+ * separator. So, one way or another, the prefix is always 3 bytes. */
+ qemud_client_send(qc, (const uint8_t*)ok_ko_str, 3);
+ /* Send extra data (if present). */
+ if (extra != NULL) {
+ qemud_client_send(qc, (const uint8_t*)extra, extra_size);
+ }
+}
+
+/* Replies query success ("OK") back to the client.
+ * Param:
+ * qc - Qemu client to send the reply to.
+ * ok_str - An optional string containing query results. Can be NULL.
+ */
+static void
+_qemu_client_reply_ok(QemudClient* qc, const char* ok_str)
+{
+ _qemu_client_query_reply(qc, 1, ok_str,
+ (ok_str != NULL) ? (strlen(ok_str) + 1) : 0);
+}
+
+/* Replies query failure ("KO") back to the client.
+ * Param:
+ * qc - Qemu client to send the reply to.
+ * ko_str - An optional string containing reason for failure. Can be NULL.
+ */
+static void
+_qemu_client_reply_ko(QemudClient* qc, const char* ko_str)
+{
+ _qemu_client_query_reply(qc, 0, ko_str,
+ (ko_str != NULL) ? (strlen(ko_str) + 1) : 0);
}
/********************************************************************************
* Camera Factory API
*******************************************************************************/
+/* Handles 'list' query received from the Factory client.
+ * Response to this query is a string that represents each connected camera in
+ * this format: 'name=devname framedims=widh1xheight1,widh2xheight2,widhNxheightN\n'
+ * Strings, representing each camera are separated with EOL symbol.
+ * Param:
+ * csd, client - Factory serivice, and client.
+ * Return:
+ * 0 on success, or != 0 on failure.
+ */
+static int
+_factory_client_list_cameras(CameraServiceDesc* csd, QemudClient* client)
+{
+ int n;
+ size_t reply_size = 0;
+ char* reply = NULL;
+
+ /* Lets see if there was anything found... */
+ if (csd->camera_count == 0) {
+ /* No cameras connected to the host. Reply with "\n" */
+ _qemu_client_reply_ok(client, "\n");
+ return 0;
+ }
+
+ /* "Stringify" each camera information into the reply string. */
+ for (n = 0; n < csd->camera_count; n++) {
+ const int res =
+ _camera_info_to_string(csd->camera_info + n, &reply, &reply_size);
+ if (res) {
+ if (reply != NULL) {
+ free(reply);
+ }
+ _qemu_client_reply_ko(client, "Memory allocation error");
+ return res;
+ }
+ }
+
+ D("%s Replied: %s", __FUNCTION__, reply);
+ _qemu_client_reply_ok(client, reply);
+ free(reply);
+
+ return 0;
+}
+
/* Handles a message received from the emulated camera factory client.
+ * Queries received here are represented as strings:
+ * 'list' - Queries list of cameras connected to the host.
+ * Param:
+ * opaque - Camera service descriptor.
+ * msg, msglen - Message received from the camera factory client.
+ * client - Camera factory client pipe.
*/
static void
_factory_client_recv(void* opaque,
@@ -68,14 +581,45 @@ _factory_client_recv(void* opaque,
int msglen,
QemudClient* client)
{
- // TODO: implement.
+ /*
+ * Emulated camera factory client queries.
+ */
+
+ /* List cameras connected to the host. */
+ static const char _query_list[] = "list";
+
+ CameraServiceDesc* csd = (CameraServiceDesc*)opaque;
+ char query_name[64];
+ const char* query_param = NULL;
+
+ /* Parse the query, extracting query name and parameters. */
+ if (_parse_query((const char*)msg, query_name, sizeof(query_name),
+ &query_param)) {
+ E("%s: Invalid format in query '%s'", __FUNCTION__, (const char*)msg);
+ _qemu_client_reply_ko(client, "Invalid query format");
+ return;
+ }
+
+ D("%s Camera factory query '%s'", __FUNCTION__, query_name);
+
+ /* Dispatch the query to an appropriate handler. */
+ if (!strcmp(query_name, _query_list)) {
+ /* This is a "list" query. */
+ _factory_client_list_cameras(csd, client);
+ } else {
+ E("%s: Unknown camera factory query name in '%s'",
+ __FUNCTION__, (const char*)msg);
+ _qemu_client_reply_ko(client, "Unknown query name");
+ }
}
-/* Emulated camera factory client has been disconnected from the service.
- */
+/* Emulated camera factory client has been disconnected from the service. */
static void
_factory_client_close(void* opaque)
{
+ /* There is nothing to clean up here: factory service is just an alias for
+ * the "root" camera service, that doesn't require anything more, than camera
+ * dervice descriptor already provides. */
}
/********************************************************************************
@@ -91,151 +635,537 @@ struct CameraClient
* On Linux this is the name of the camera device.
* On Windows this is the name of capturing window.
*/
- char* name;
-
+ char* device_name;
/* Input channel to use to connect to the camera. */
- int inp_channel;
-
- /* Extra parameters passed to the client. */
- char* remaining_param;
+ int inp_channel;
+ /* Camera information. */
+ const CameraInfo* camera_info;
+ /* Emulated camera device descriptor. */
+ CameraDevice* camera;
+ /* Buffer allocated for video frames.
+ * Note that memory allocated for this buffer
+ * also contains preview framebuffer. */
+ uint8_t* video_frame;
+ /* Point to Cb pane inside the video frame buffer. */
+ uint8_t* video_frame_Cb;
+ /* Point to Cr pane inside the video frame buffer. */
+ uint8_t* video_frame_Cr;
+ /* Preview frame buffer.
+ * This address points inside the 'video_frame' buffer. */
+ uint16_t* preview_frame;
+ /* Byte size of the videoframe buffer. */
+ size_t video_frame_size;
+ /* Byte size of the preview frame buffer. */
+ size_t preview_frame_size;
+ /* Pixel format required by the guest. */
+ uint32_t pixel_format;
+ /* Frame width. */
+ int width;
+ /* Frame height. */
+ int height;
+ /* Number of pixels in a frame buffer. */
+ int pixel_num;
+ /* Status of video and preview frame cache. */
+ int frames_cached;
};
-/* Frees emulated camera client descriptor.
- */
+/* Frees emulated camera client descriptor. */
static void
_camera_client_free(CameraClient* cc)
{
- if (cc->remaining_param != NULL) {
- free(cc->remaining_param);
+ /* The only exception to the "read only" rule: we have to mark the camera
+ * as being not used when we destroy a service for it. */
+ if (cc->camera_info != NULL) {
+ ((CameraInfo*)cc->camera_info)->in_use = 0;
+ }
+ if (cc->camera != NULL) {
+ camera_device_close(cc->camera);
+ }
+ if (cc->video_frame != NULL) {
+ free(cc->video_frame);
}
- if (cc->name != NULL) {
- free(cc->name);
+ if (cc->device_name != NULL) {
+ free(cc->device_name);
}
AFREE(cc);
}
-/* Parses emulated camera client parameters.
+/* Creates descriptor for a connecting emulated camera client.
* Param:
- * param - Parameters to parse. This string contains multiple parameters,
- * separated by a ':' character. The format of the parameters string is as
- * follows:
- * <device name>[:<input channel #>][:<extra param>],
- * where 'device name' is a required parameter defining name of the camera
- * device, 'input channel' is an optional parameter (positive integer),
- * defining input channel to use on the camera device. Format of the
- * extra parameters is not defined at this point.
- * device_name - Upon success contains camera device name. The caller is
- * responsible for freeing string buffer returned here.
- * inp_channel - Upon success contains the input channel to use when connecting
- * to the device. If this parameter is missing, a 0 will be returned here.
- * remainder - Contains copy of the string containing remander of the parameters
- * following device name and input channel. If there are no remainding
- * parameters, a NULL will be returned here. The caller is responsible for
- * freeing string buffer returned here.
+ * csd - Camera service descriptor.
+ * param - Client parameters. Must be formatted as described in comments to
+ * _get_param_value routine, and must contain at least 'name' parameter,
+ * identifiying the camera device to create the service for. Also parameters
+ * may contain a decimal 'inp_channel' parameter, selecting the input
+ * channel to use when communicating with the camera device.
* Return:
- * 0 on success, or !0 on failure.
+ * Emulated camera client descriptor on success, or NULL on failure.
*/
-static int
-_parse_camera_client_param(const char* param,
- char** device_name,
- int* inp_channel,
- char** remainder)
+static CameraClient*
+_camera_client_create(CameraServiceDesc* csd, const char* param)
+{
+ CameraClient* cc;
+ CameraInfo* ci;
+ int res;
+ ANEW0(cc);
+
+ /*
+ * Parse parameter string, containing camera client properties.
+ */
+
+ /* Pull required device name. */
+ if (_get_param_value_alloc(param, "name", &cc->device_name)) {
+ E("%s: Allocation failure, or required 'name' parameter is missing, or misformed in '%s'",
+ __FUNCTION__, param);
+ return NULL;
+ }
+
+ /* Pull optional input channel. */
+ res = _get_param_value_int(param, "inp_channel", &cc->inp_channel);
+ if (res != 0) {
+ if (res == -1) {
+ /* 'inp_channel' parameter has been ommited. Use default input
+ * channel, which is zero. */
+ cc->inp_channel = 0;
+ } else {
+ E("%s: 'inp_channel' parameter is misformed in '%s'",
+ __FUNCTION__, param);
+ return NULL;
+ }
+ }
+
+ /* Get camera info for the emulated camera represented with this service.
+ * Array of camera information records has been created when the camera
+ * service was enumerating camera devices during the service initialization.
+ * By the camera service protocol, camera service clients must first obtain
+ * list of enumerated cameras via the 'list' query to the camera service, and
+ * then use device name reported in the list to connect to an emulated camera
+ * service. So, if camera information for the given device name is not found
+ * in the array, we fail this connection due to protocol violation. */
+ ci = _camera_service_get_camera_info(csd, cc->device_name);
+ if (ci == NULL) {
+ E("%s: Cannot find camera info for device '%s'",
+ __FUNCTION__, cc->device_name);
+ _camera_client_free(cc);
+ return NULL;
+ }
+
+ /* We can't allow multiple camera services for a single camera device, Lets
+ * make sure that there is no client created for this camera. */
+ if (ci->in_use) {
+ E("%s: Camera device '%s' is in use", __FUNCTION__, cc->device_name);
+ _camera_client_free(cc);
+ return NULL;
+ }
+
+ /* We're done. Set camera in use, and succeed the connection. */
+ ci->in_use = 1;
+ cc->camera_info = ci;
+
+ D("%s: Camera service is created for device '%s' using input channel %d",
+ __FUNCTION__, cc->device_name, cc->inp_channel);
+
+ return cc;
+}
+
+/********************************************************************************
+ * Camera client queries
+ *******************************************************************************/
+
+/* Client has queried conection to the camera.
+ * Param:
+ * cc - Queried camera client descriptor.
+ * qc - Qemu client for the emulated camera.
+ * param - Query parameters. There are no parameters expected for this query.
+ */
+static void
+_camera_client_query_connect(CameraClient* cc, QemudClient* qc, const char* param)
+{
+ if (cc->camera != NULL) {
+ /* Already connected. */
+ W("%s: Camera '%s' is already connected", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ok(qc, "Camera is already connected");
+ return;
+ }
+
+ /* Open camera device. */
+ cc->camera = camera_device_open(cc->device_name, cc->inp_channel);
+ if (cc->camera == NULL) {
+ E("%s: Unable to open camera device '%s'", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ko(qc, "Unable to open camera device.");
+ return;
+ }
+
+ D("%s: Camera device '%s' is now connected", __FUNCTION__, cc->device_name);
+
+ _qemu_client_reply_ok(qc, NULL);
+}
+
+/* Client has queried disconection from the camera.
+ * Param:
+ * cc - Queried camera client descriptor.
+ * qc - Qemu client for the emulated camera.
+ * param - Query parameters. There are no parameters expected for this query.
+ */
+static void
+_camera_client_query_disconnect(CameraClient* cc,
+ QemudClient* qc,
+ const char* param)
{
- const char* wrk = param;
- const char* sep;
+ if (cc->camera == NULL) {
+ /* Already disconnected. */
+ W("%s: Camera '%s' is already disconnected", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ok(qc, "Camera is not connected");
+ return;
+ }
+
+ /* Before we can go ahead and disconnect, we must make sure that camera is
+ * not capturing frames. */
+ if (cc->video_frame != NULL) {
+ E("%s: Cannot disconnect camera '%s' while it is not stopped",
+ __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ko(qc, "Camera is not stopped");
+ return;
+ }
- *device_name = *remainder = NULL;
- *inp_channel = 0;
+ /* Close camera device. */
+ camera_device_close(cc->camera);
+ cc->camera = NULL;
+
+ D("Camera device '%s' is now disconnected", cc->device_name);
+
+ _qemu_client_reply_ok(qc, NULL);
+}
+
+/* Client has queried the client to start capturing video.
+ * Param:
+ * cc - Queried camera client descriptor.
+ * qc - Qemu client for the emulated camera.
+ * param - Query parameters. Parameters for this query must contain a 'dim', and
+ * a 'pix' parameters, where 'dim' must be "dim=<width>x<height>", and 'pix'
+ * must be "pix=<format>", where 'width' and 'height' must be numerical
+ * values for the capturing video frame width, and height, and 'format' must
+ * be a numerical value for the pixel format of the video frames expected by
+ * the client. 'format' must be one of the V4L2_PIX_FMT_XXX values.
+ */
+static void
+_camera_client_query_start(CameraClient* cc, QemudClient* qc, const char* param)
+{
+ char* w;
+ char dim[64];
+ int width, height, pix_format;
/* Sanity check. */
- if (param == NULL || *param == '\0') {
- E("%s: Parameters must contain device name", __FUNCTION__);
- return -1;
+ if (cc->camera == NULL) {
+ /* Not connected. */
+ E("%s: Camera '%s' is not connected", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ko(qc, "Camera is not connected");
+ return;
}
- /* Must contain device name. */
- sep = strchr(wrk, ':');
- if (sep == NULL) {
- /* Contains only device name. */
- *device_name = ASTRDUP(param);
- return 0;
+ /*
+ * Parse parameters.
+ */
+
+ if (param == NULL) {
+ E("%s: Missing parameters for the query", __FUNCTION__);
+ _qemu_client_reply_ko(qc, "Missing parameters for the query");
+ return;
}
- /* Save device name. */
- *device_name = (char*)malloc((sep - wrk) + 1);
- if (*device_name == NULL) {
- derror("%s: Not enough memory", __FUNCTION__);
- return -1;
+ /* Pull required 'dim' parameter. */
+ if (_get_param_value(param, "dim", dim, sizeof(dim))) {
+ E("%s: Invalid or missing 'dim' parameter in '%s'", __FUNCTION__, param);
+ _qemu_client_reply_ko(qc, "Invalid or missing 'dim' parameter");
+ return;
}
- memcpy(*device_name, wrk, sep - wrk);
- (*device_name)[sep - wrk] = '\0';
- /* Move on to the the input channel. */
- wrk = sep + 1;
- if (*wrk == '\0') {
- return 0;
+ /* Pull required 'pix' parameter. */
+ if (_get_param_value_int(param, "pix", &pix_format)) {
+ E("%s: Invalid or missing 'pix' parameter in '%s'", __FUNCTION__, param);
+ _qemu_client_reply_ko(qc, "Invalid or missing 'pix' parameter");
+ return;
}
- sep = strchr(wrk, ':');
- if (sep == NULL) {
- sep = wrk + strlen(wrk);
+
+ /* Parse 'dim' parameter, and get requested frame width and height. */
+ w = strchr(dim, 'x');
+ if (w == NULL || w[1] == '\0') {
+ E("%s: Invalid 'dim' parameter in '%s'", __FUNCTION__, param);
+ _qemu_client_reply_ko(qc, "Invalid 'dim' parameter");
+ return;
}
- errno = 0; // strtol doesn't set it on success.
- *inp_channel = strtol(wrk, (char**)&sep, 10);
- if (errno != 0) {
- E("%s: Parameters %s contain invalid input channel",
- __FUNCTION__, param);
- free(*device_name);
- *device_name = NULL;
- return -1;
+ *w = '\0'; w++;
+ errno = 0;
+ width = strtoi(dim, NULL, 10);
+ height = strtoi(w, NULL, 10);
+ if (errno) {
+ E("%s: Invalid 'dim' parameter in '%s'", __FUNCTION__, param);
+ _qemu_client_reply_ko(qc, "Invalid 'dim' parameter");
+ return;
}
- if (*sep == '\0') {
- return 0;
+
+ /* After collecting capture parameters lets see if camera has already
+ * started, and if so, lets see if parameters match. */
+ if (cc->video_frame != NULL) {
+ /* Already started. Match capture parameters. */
+ if (cc->pixel_format != pix_format ||cc->width != width ||
+ cc->height != height) {
+ /* Parameters match. Succeed the query. */
+ W("%s: Camera '%s' is already started", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ok(qc, "Camera is already started");
+ } else {
+ /* Parameters don't match. Fail the query. */
+ E("%s: Camera '%s' is already started, and parameters don't match:\n"
+ "Current %.4s[%dx%d] != requested %.4s[%dx%d]",
+ __FUNCTION__, cc->device_name, (const char*)&cc->pixel_format,
+ cc->width, cc->height, (const char*)&pix_format, width, height);
+ _qemu_client_reply_ko(qc,
+ "Camera is already started with different capturing parameters");
+ }
+ return;
}
- /* Move on to the the remaining string. */
- wrk = sep + 1;
- if (*wrk == '\0') {
- return 0;
+ /*
+ * Start the camera.
+ */
+
+ /* Save capturing parameters. */
+ cc->pixel_format = pix_format;
+ cc->width = width;
+ cc->height = height;
+ cc->pixel_num = cc->width * cc->height;
+ cc->frames_cached = 0;
+
+ /* Make sure that pixel format is known, and calculate video framebuffer size
+ * along the lines. */
+ switch (cc->pixel_format) {
+ case V4L2_PIX_FMT_YVU420:
+ cc->video_frame_size = (cc->pixel_num * 12) / 8;
+ break;
+
+ default:
+ E("%s: Unknown pixel format %.4s",
+ __FUNCTION__, (char*)&cc->pixel_format);
+ _qemu_client_reply_ko(qc, "Pixel format is unknown");
+ return;
}
- *remainder = ASTRDUP(wrk);
- return 0;
+ /* Make sure that we have a converters between the original camera pixel
+ * format and the one that the client expects. Also a converter must exist
+ * for the preview window pixel format (RGB32) */
+ if (!has_converter(cc->camera_info->pixel_format, cc->pixel_format) ||
+ !has_converter(cc->camera_info->pixel_format, V4L2_PIX_FMT_RGB32)) {
+ E("%s: No conversion exist between %.4s and %.4s (or RGB32) pixel formats",
+ __FUNCTION__, (char*)&cc->camera_info->pixel_format, (char*)&cc->pixel_format);
+ _qemu_client_reply_ko(qc, "No conversion exist for the requested pixel format");
+ return;
+ }
+
+ /* TODO: At the moment camera framework in the emulator requires RGB32 pixel
+ * format for preview window. So, we need to keep two framebuffers here: one
+ * for the video, and another for the preview window. Watch out when this
+ * changes (if changes). */
+ cc->preview_frame_size = cc->pixel_num * 4;
+
+ /* Allocate buffer large enough to contain both, video and preview
+ * framebuffers. */
+ cc->video_frame =
+ (uint8_t*)malloc(cc->video_frame_size + cc->preview_frame_size);
+ if (cc->video_frame == NULL) {
+ E("%s: Not enough memory for framebuffers %d + %d",
+ __FUNCTION__, cc->video_frame_size, cc->preview_frame_size);
+ _qemu_client_reply_ko(qc, "Out of memory");
+ return;
+ }
+
+ /* Set framebuffer pointers. */
+ cc->preview_frame = (uint16_t*)(cc->video_frame + cc->video_frame_size);
+ /* TODO: Get rid if this! It assumes that client's framebuffer is YV12. Let
+ * the camera do the conversion. All we need is to ensure that framebuffers
+ * allocated here are large enough! */
+ cc->video_frame_Cb = cc->video_frame + cc->pixel_num;
+ cc->video_frame_Cr = cc->video_frame_Cb + cc->pixel_num / 4;
+
+ /* Start the camera. */
+ if (camera_device_start_capturing(cc->camera, cc->camera_info->pixel_format,
+ cc->width, cc->height)) {
+ E("%s: Cannot start camera '%s' for %.4s[%dx%d]: %s",
+ __FUNCTION__, cc->device_name, (const char*)&cc->pixel_format,
+ cc->width, cc->height, strerror(errno));
+ free(cc->video_frame);
+ cc->video_frame = NULL;
+ _qemu_client_reply_ko(qc, "Cannot start the camera");
+ return;
+ }
+
+ D("%s: Camera '%s' is now started for %.4s[%dx%d]",
+ __FUNCTION__, cc->device_name, (char*)&cc->pixel_format, cc->width,
+ cc->height);
+
+ _qemu_client_reply_ok(qc, NULL);
}
-/* Creates descriptor for a connecting emulated camera client.
+/* Client has queried the client to stop capturing video.
* Param:
- * csd - Camera service descriptor.
- * param - Client parameters. Must be formatted as follows:
- * - Multiple parameters are separated by ':'
- * - Must begin with camera device name
- * - Can follow with an optional input channel number, wich must be an
- * integer value
- * Return:
- * Emulated camera client descriptor on success, or NULL on failure.
+ * cc - Queried camera client descriptor.
+ * qc - Qemu client for the emulated camera.
+ * param - Query parameters. There are no parameters expected for this query.
*/
-static CameraClient*
-_camera_client_create(CameraServiceDesc* csd, const char* param)
+static void
+_camera_client_query_stop(CameraClient* cc, QemudClient* qc, const char* param)
{
- CameraClient* cc;
- int res;
+ if (cc->video_frame == NULL) {
+ /* Not started. */
+ W("%s: Camera '%s' is not started", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ok(qc, "Camera is not started");
+ return;
+ }
- ANEW0(cc);
+ /* Stop the camera. */
+ if (camera_device_stop_capturing(cc->camera)) {
+ E("%s: Cannot stop camera device '%s': %s",
+ __FUNCTION__, cc->device_name, strerror(errno));
+ _qemu_client_reply_ko(qc, "Cannot stop camera device");
+ return;
+ }
- /* Parse parameters, and save them to the client. */
- res = _parse_camera_client_param(param, &cc->name, &cc->inp_channel,
- &cc->remaining_param);
- if (res) {
- _camera_client_free(cc);
- return NULL;
+ free(cc->video_frame);
+ cc->video_frame = NULL;
+
+ D("%s: Camera device '%s' is now stopped.", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ok(qc, NULL);
+}
+
+/* Client has queried next frame.
+ * Param:
+ * cc - Queried camera client descriptor.
+ * qc - Qemu client for the emulated camera.
+ * param - Query parameters. Parameters for this query must contain a 'video',
+ * and a 'preview' parameters, both must be decimal values, defining size of
+ * requested video, and preview frames respectively. Zero value for any of
+ * the parameters means that this particular frame is not requested.
+ */
+static void
+_camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param)
+{
+ int video_size = 0;
+ int preview_size = 0;
+ int repeat;
+ ClientFrameBuffer fbs[2];
+ int fbs_num = 0;
+ size_t payload_size;
+
+ /* Sanity check. */
+ if (cc->video_frame == NULL) {
+ /* Not started. */
+ E("%s: Camera '%s' is not started", __FUNCTION__, cc->device_name);
+ _qemu_client_reply_ko(qc, "Camera is not started");
+ return;
}
- D("Camera client created: name=%s, inp_channel=%d",
- cc->name, cc->inp_channel);
- return cc;
+ /* Pull required parameters. */
+ if (_get_param_value_int(param, "video", &video_size) ||
+ _get_param_value_int(param, "preview", &preview_size)) {
+ E("%s: Invalid or missing 'video', or 'preview' parameter in '%s'",
+ __FUNCTION__, param);
+ _qemu_client_reply_ko(qc,
+ "Invalid or missing 'video', or 'preview' parameter");
+ return;
+ }
+
+ /* Verify that framebuffer sizes match the ones that the started camera
+ * operates with. */
+ if ((video_size != 0 && cc->video_frame_size != video_size) ||
+ (preview_size != 0 && cc->preview_frame_size != preview_size)) {
+ E("%s: Frame sizes don't match for camera '%s':\n"
+ "Expected %d for video, and %d for preview. Requested %d, and %d",
+ __FUNCTION__, cc->device_name, cc->video_frame_size,
+ cc->preview_frame_size, video_size, preview_size);
+ _qemu_client_reply_ko(qc, "Frame size mismatch");
+ return;
+ }
+
+ /*
+ * Initialize framebuffer array for frame read.
+ */
+
+ if (video_size) {
+ fbs[fbs_num].pixel_format = cc->pixel_format;
+ fbs[fbs_num].framebuffer = cc->video_frame;
+ fbs_num++;
+ }
+ if (preview_size) {
+ /* TODO: Watch out for preview format changes! */
+ fbs[fbs_num].pixel_format = V4L2_PIX_FMT_RGB32;
+ fbs[fbs_num].framebuffer = cc->preview_frame;
+ fbs_num++;
+ }
+
+ /* Capture new frame. */
+ repeat = camera_device_read_frame(cc->camera, fbs, fbs_num);
+
+ /* Note that there is no (known) way how to wait on next frame being
+ * available, so we dequeue frame buffer from the device only when we know
+ * it's available. Instead we're shooting in the dark, and quite often
+ * device will response with EAGAIN, indicating that it doesn't have frame
+ * ready. In turn, it means that the last frame we have obtained from the
+ * device is still good, and we can reply with the cached frames. The only
+ * case when we need to keep trying to obtain a new frame is when frame cache
+ * is empty. */
+ while (repeat == 1 && !cc->frames_cached) {
+ repeat = camera_device_read_frame(cc->camera, fbs, fbs_num);
+ }
+ if (repeat < 0) {
+ E("%s: Unable to obtain video frame from the camera '%s': %s",
+ __FUNCTION__, cc->device_name, strerror(errno));
+ _qemu_client_reply_ko(qc, "Unable to obtain video frame from the camera");
+ return;
+ }
+ /* We have cached something... */
+ cc->frames_cached = 1;
+
+ /*
+ * Build the reply.
+ */
+
+ /* Payload includes "ok:" + requested video and preview frames. */
+ payload_size = 3 + video_size + preview_size;
+
+ /* Send payload size first. */
+ _qemu_client_reply_payload(qc, payload_size);
+
+ /* After that send the 'ok:'. Note that if there is no frames sent, we should
+ * use prefix "ok" instead of "ok:" */
+ if (video_size || preview_size) {
+ qemud_client_send(qc, (const uint8_t*)"ok:", 3);
+ } else {
+ /* Still 3 bytes: zero terminator is required in this case. */
+ qemud_client_send(qc, (const uint8_t*)"ok", 3);
+ }
+
+ /* After that send video frame (if requested). */
+ if (video_size) {
+ qemud_client_send(qc, cc->video_frame, video_size);
+ }
+
+ /* After that send preview frame (if requested). */
+ if (preview_size) {
+ qemud_client_send(qc, (const uint8_t*)cc->preview_frame, preview_size);
+ }
}
/* Handles a message received from the emulated camera client.
+ * Queries received here are represented as strings:
+ * - 'connect' - Connects to the camera device (opens it).
+ * - 'disconnect' - Disconnexts from the camera device (closes it).
+ * - 'start' - Starts capturing video from the connected camera device.
+ * - 'stop' - Stop capturing video from the connected camera device.
+ * - 'frame' - Queries video and preview frames captured from the camera.
+ * Param:
+ * opaque - Camera service descriptor.
+ * msg, msglen - Message received from the camera factory client.
+ * client - Camera factory client pipe.
*/
static void
_camera_client_recv(void* opaque,
@@ -243,20 +1173,68 @@ _camera_client_recv(void* opaque,
int msglen,
QemudClient* client)
{
+ /*
+ * Emulated camera client queries.
+ */
+
+ /* Connect to the camera. */
+ static const char _query_connect[] = "connect";
+ /* Disconnect from the camera. */
+ static const char _query_disconnect[] = "disconnect";
+ /* Start video capturing. */
+ static const char _query_start[] = "start";
+ /* Stop video capturing. */
+ static const char _query_stop[] = "stop";
+ /* Query frame(s). */
+ static const char _query_frame[] = "frame";
+
+ char query_name[64];
+ const char* query_param;
CameraClient* cc = (CameraClient*)opaque;
- // TODO: implement!
+ /*
+ * Emulated camera queries are formatted as such:
+ * "<query name> [<parameters>]"
+ */
+
+ T("%s: Camera client query: '%s'", __FUNCTION__, (char*)msg);
+ if (_parse_query((const char*)msg, query_name, sizeof(query_name),
+ &query_param)) {
+ E("%s: Invalid query '%s'", __FUNCTION__, (char*)msg);
+ _qemu_client_reply_ko(client, "Invalid query");
+ return;
+ }
+
+ /* Dispatch the query to an appropriate handler. */
+ if (!strcmp(query_name, _query_frame)) {
+ /* A frame is queried. */
+ _camera_client_query_frame(cc, client, query_param);
+ } else if (!strcmp(query_name, _query_connect)) {
+ /* Camera connection is queried. */
+ _camera_client_query_connect(cc, client, query_param);
+ } else if (!strcmp(query_name, _query_disconnect)) {
+ /* Camera disnection is queried. */
+ _camera_client_query_disconnect(cc, client, query_param);
+ } else if (!strcmp(query_name, _query_start)) {
+ /* Start capturing is queried. */
+ _camera_client_query_start(cc, client, query_param);
+ } else if (!strcmp(query_name, _query_stop)) {
+ /* Stop capturing is queried. */
+ _camera_client_query_stop(cc, client, query_param);
+ } else {
+ E("%s: Unknown query '%s'", __FUNCTION__, (char*)msg);
+ _qemu_client_reply_ko(client, "Unknown query");
+ }
}
-/* Emulated camera client has been disconnected from the service.
- */
+/* Emulated camera client has been disconnected from the service. */
static void
-_camera_client_close(void* opaque)
+_camera_client_close(void* opaque)
{
CameraClient* cc = (CameraClient*)opaque;
- D("Camera client closed: name=%s, inp_channel=%d",
- cc->name, cc->inp_channel);
+ D("%s: Camera client for device '%s' on input channel %d is now closed",
+ __FUNCTION__, cc->device_name, cc->inp_channel);
_camera_client_free(cc);
}
@@ -284,6 +1262,8 @@ _camera_service_connect(void* opaque,
QemudClient* client = NULL;
CameraServiceDesc* csd = (CameraServiceDesc*)opaque;
+ D("%s: Connecting camera client '%s'",
+ __FUNCTION__, client_param ? client_param : "Factory");
if (client_param == NULL || *client_param == '\0') {
/* This is an emulated camera factory client. */
client = qemud_client_new(serv, channel, client_param, csd,
@@ -308,15 +1288,16 @@ android_camera_service_init(void)
static int _inited = 0;
if (!_inited) {
- _csDesc_init(&_camera_service_desc);
+ _camera_service_init(&_camera_service_desc);
QemudService* serv = qemud_service_register( SERVICE_NAME, 0,
&_camera_service_desc,
_camera_service_connect,
NULL, NULL);
if (serv == NULL) {
- derror("could not register '%s' service", SERVICE_NAME);
+ derror("%s: Could not register '%s' service",
+ __FUNCTION__, SERVICE_NAME);
return;
}
- D("registered '%s' qemud service", SERVICE_NAME);
+ D("%s: Registered '%s' qemud service", __FUNCTION__, SERVICE_NAME);
}
}