aboutsummaryrefslogtreecommitdiffstats
path: root/android/camera
diff options
context:
space:
mode:
authorVladimir Chtchetkine <vchtchetkine@google.com>2011-11-23 13:03:37 -0800
committerVladimir Chtchetkine <vchtchetkine@google.com>2011-12-05 09:17:35 -0800
commit37fb84f8b26e3061c1ccb404bf4c962eed5e6057 (patch)
treeb60e5b1f69a291c9396c665ee6556a059f481fc4 /android/camera
parent4e61742d4f26cefb1baf8d2dc5e7dc8b85a78549 (diff)
downloadexternal_qemu-37fb84f8b26e3061c1ccb404bf4c962eed5e6057.zip
external_qemu-37fb84f8b26e3061c1ccb404bf4c962eed5e6057.tar.gz
external_qemu-37fb84f8b26e3061c1ccb404bf4c962eed5e6057.tar.bz2
Implements white balance and exposure compensation for emulated webcam
Change-Id: Id2dbb227280f0c0e1b5708ef78b9f19b087f92d5
Diffstat (limited to 'android/camera')
-rw-r--r--android/camera/camera-capture-linux.c12
-rw-r--r--android/camera/camera-capture-mac.m15
-rwxr-xr-xandroid/camera/camera-capture-windows.c9
-rw-r--r--android/camera/camera-capture.h8
-rwxr-xr-xandroid/camera/camera-format-converters.c265
-rwxr-xr-xandroid/camera/camera-format-converters.h8
-rw-r--r--android/camera/camera-service.c37
7 files changed, 274 insertions, 80 deletions
diff --git a/android/camera/camera-capture-linux.c b/android/camera/camera-capture-linux.c
index 5243fb6..e56ffae 100644
--- a/android/camera/camera-capture-linux.c
+++ b/android/camera/camera-capture-linux.c
@@ -969,7 +969,11 @@ camera_device_stop_capturing(CameraDevice* ccd)
int
camera_device_read_frame(CameraDevice* ccd,
ClientFrameBuffer* framebuffers,
- int fbs_num)
+ int fbs_num,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
LinuxCameraDevice* cd;
@@ -1011,7 +1015,8 @@ camera_device_read_frame(CameraDevice* ccd,
cd->actual_pixel_format.sizeimage,
cd->actual_pixel_format.width,
cd->actual_pixel_format.height,
- framebuffers, fbs_num);
+ framebuffers, fbs_num,
+ r_scale, g_scale, b_scale, exp_comp);
} else {
/* Dequeue next buffer from the device. */
struct v4l2_buffer buf;
@@ -1039,7 +1044,8 @@ camera_device_read_frame(CameraDevice* ccd,
cd->actual_pixel_format.sizeimage,
cd->actual_pixel_format.width,
cd->actual_pixel_format.height,
- framebuffers, fbs_num);
+ framebuffers, fbs_num,
+ r_scale, g_scale, b_scale, exp_comp);
/* Requeue the buffer back to the device. */
if (_xioctl(cd->handle, VIDIOC_QBUF, &buf) < 0) {
diff --git a/android/camera/camera-capture-mac.m b/android/camera/camera-capture-mac.m
index 4d684e0..ca1b3a8 100644
--- a/android/camera/camera-capture-mac.m
+++ b/android/camera/camera-capture-mac.m
@@ -120,7 +120,7 @@ _QTtoFOURCC(uint32_t qt_pix_format)
* in the device. The client should respond to this value by repeating the
* read, rather than reporting an error.
*/
-- (int)read_frame:(ClientFrameBuffer*)framebuffers:(int)fbs_num;
+- (int)read_frame:(ClientFrameBuffer*)framebuffers:(int)fbs_num:(float)r_scale:(float)g_scale:(float)b_scale:(float)exp_comp;
@end
@@ -260,7 +260,7 @@ _QTtoFOURCC(uint32_t qt_pix_format)
}
}
-- (int)read_frame:(ClientFrameBuffer*)framebuffers:(int)fbs_num
+- (int)read_frame:(ClientFrameBuffer*)framebuffers:(int)fbs_num:(float)r_scale:(float)g_scale:(float)b_scale:(float)exp_comp
{
int res = -1;
@@ -284,7 +284,8 @@ _QTtoFOURCC(uint32_t qt_pix_format)
/* Convert framebuffer. */
res = convert_frame(pixels, pixel_format, frame_size,
frame_width, frame_height,
- framebuffers, fbs_num);
+ framebuffers, fbs_num,
+ r_scale, g_scale, b_scale, exp_comp);
} else {
E("%s: Unable to obtain framebuffer", __FUNCTION__);
res = -1;
@@ -452,7 +453,11 @@ camera_device_stop_capturing(CameraDevice* cd)
int
camera_device_read_frame(CameraDevice* cd,
ClientFrameBuffer* framebuffers,
- int fbs_num)
+ int fbs_num,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
MacCameraDevice* mcd;
@@ -467,7 +472,7 @@ camera_device_read_frame(CameraDevice* cd,
return -1;
}
- return [mcd->device read_frame:framebuffers:fbs_num];
+ return [mcd->device read_frame:framebuffers:fbs_num:r_scale:g_scale:b_scale:exp_comp];
}
void
diff --git a/android/camera/camera-capture-windows.c b/android/camera/camera-capture-windows.c
index ecf5b62..c83b502 100755
--- a/android/camera/camera-capture-windows.c
+++ b/android/camera/camera-capture-windows.c
@@ -465,7 +465,11 @@ camera_device_stop_capturing(CameraDevice* cd)
int
camera_device_read_frame(CameraDevice* cd,
ClientFrameBuffer* framebuffers,
- int fbs_num)
+ int fbs_num,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
WndCameraDevice* wcd;
HBITMAP bm_handle;
@@ -527,7 +531,8 @@ camera_device_read_frame(CameraDevice* cd,
wcd->gdi_bitmap->bmiHeader.biSizeImage,
wcd->frame_bitmap->bmiHeader.biWidth,
wcd->frame_bitmap->bmiHeader.biHeight,
- framebuffers, fbs_num);
+ framebuffers, fbs_num,
+ r_scale, g_scale, b_scale, exp_comp);
}
void
diff --git a/android/camera/camera-capture.h b/android/camera/camera-capture.h
index 3025a23..bf754eb 100644
--- a/android/camera/camera-capture.h
+++ b/android/camera/camera-capture.h
@@ -73,6 +73,8 @@ extern int camera_device_stop_capturing(CameraDevice* cd);
* make sure that buffers are large enough to contain entire frame captured
* from the device.
* fbs_num - Number of entries in the 'framebuffers' array.
+ * r_scale, g_scale, b_scale - White balance scale.
+ * exp_comp - Expsoure compensation.
* Return:
* 0 on success, or non-zero value on failure. There is a special vaule 1
* returned from this routine which indicates that frames were not available in
@@ -82,7 +84,11 @@ extern int camera_device_stop_capturing(CameraDevice* cd);
*/
extern int camera_device_read_frame(CameraDevice* cd,
ClientFrameBuffer* framebuffers,
- int fbs_num);
+ int fbs_num,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp);
/* Closes camera device, opened in camera_device_open routine.
* Param:
diff --git a/android/camera/camera-format-converters.c b/android/camera/camera-format-converters.c
index 3366a44..a785540 100755
--- a/android/camera/camera-format-converters.c
+++ b/android/camera/camera-format-converters.c
@@ -242,6 +242,96 @@ YUVToRGBPix(int y, int u, int v, uint8_t* r, uint8_t* g, uint8_t* b)
*b = (uint8_t)YUV2BO(y,u,v);
}
+/* Computes a luminance value after taking the exposure compensation.
+ * value into account.
+ *
+ * Param:
+ * inputY - The input luminance value.
+ * Return:
+ * The luminance value after adjusting the exposure compensation.
+ */
+static __inline__ uint8_t
+_change_exposure(uint8_t inputY, float exp_comp)
+{
+ return (uint8_t)clamp((float)inputY * exp_comp);
+}
+
+/* Adjusts an RGB pixel for the given exposure compensation. */
+static __inline__ void
+_change_exposure_RGB(uint8_t* r, uint8_t* g, uint8_t* b, float exp_comp)
+{
+ uint8_t y, u, v;
+ R8G8B8ToYUV(*r, *g, *b, &y, &u, &v);
+ YUVToRGBPix(_change_exposure(y, exp_comp), u, v, r, g, b);
+}
+
+/* Adjusts an RGB pixel for the given exposure compensation. */
+static __inline__ void
+_change_exposure_RGB_i(int* r, int* g, int* b, float exp_comp)
+{
+ uint8_t y, u, v;
+ R8G8B8ToYUV(*r, *g, *b, &y, &u, &v);
+ y = _change_exposure(y, exp_comp);
+ *r = YUV2RO(y,u,v);
+ *g = YUV2GO(y,u,v);
+ *b = YUV2BO(y,u,v);
+}
+
+/* Computes the pixel value after adjusting the white balance to the current
+ * one. The input the y, u, v channel of the pixel and the adjusted value will
+ * be stored in place. The adjustment is done in RGB space.
+ */
+static __inline__ void
+_change_white_balance_YUV(uint8_t* y,
+ uint8_t* u,
+ uint8_t* v,
+ float r_scale,
+ float g_scale,
+ float b_scale)
+{
+ int r = (float)(YUV2R((int)*y, (int)*u, (int)*v)) / r_scale;
+ int g = (float)(YUV2G((int)*y, (int)*u, (int)*v)) / g_scale;
+ int b = (float)(YUV2B((int)*y, (int)*u, (int)*v)) / b_scale;
+
+ *y = RGB2Y(r, g, b);
+ *u = RGB2U(r, g, b);
+ *v = RGB2V(r, g, b);
+}
+
+/* Computes the pixel value after adjusting the white balance to the current
+ * one. The input the r, and b channels of the pixel and the adjusted value will
+ * be stored in place.
+ */
+static __inline__ void
+_change_white_balance_RGB(int* r,
+ int* g,
+ int* b,
+ float r_scale,
+ float g_scale,
+ float b_scale)
+{
+ *r = (float)*r / r_scale;
+ *g = (float)*g / g_scale;
+ *b = (float)*b / b_scale;
+}
+
+/* Computes the pixel value after adjusting the white balance to the current
+ * one. The input the r, and b channels of the pixel and the adjusted value will
+ * be stored in place.
+ */
+static __inline__ void
+_change_white_balance_RGB_b(uint8_t* r,
+ uint8_t* g,
+ uint8_t* b,
+ float r_scale,
+ float g_scale,
+ float b_scale)
+{
+ *r = (float)*r / r_scale;
+ *g = (float)*g / g_scale;
+ *b = (float)*b / b_scale;
+}
+
/********************************************************************************
* Generic converters between YUV and RGB formats
*******************************************************************************/
@@ -912,7 +1002,11 @@ RGBToYUV(const RGBDesc* rgb_fmt,
const void* rgb,
void* yuv,
int width,
- int height)
+ int height,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int y, x;
const int Y_Inc = yuv_fmt->Y_inc;
@@ -928,8 +1022,12 @@ RGBToYUV(const RGBDesc* rgb_fmt,
pY += Y_next_pair, pU += UV_inc, pV += UV_inc) {
uint8_t r, g, b;
rgb = rgb_fmt->load_rgb(rgb, &r, &g, &b);
+ _change_white_balance_RGB_b(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB(&r, &g, &b, exp_comp);
R8G8B8ToYUV(r, g, b, pY, pU, pV);
rgb = rgb_fmt->load_rgb(rgb, &r, &g, &b);
+ _change_white_balance_RGB_b(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB(&r, &g, &b, exp_comp);
pY[Y_Inc] = RGB2Y((int)r, (int)g, (int)b);
}
/* Aling rgb_ptr to 16 bit */
@@ -944,13 +1042,19 @@ RGBToRGB(const RGBDesc* src_rgb_fmt,
const void* src_rgb,
void* dst_rgb,
int width,
- int height)
+ int height,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int x, y;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
uint8_t r, g, b;
src_rgb = src_rgb_fmt->load_rgb(src_rgb, &r, &g, &b);
+ _change_white_balance_RGB_b(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB(&r, &g, &b, exp_comp);
dst_rgb = dst_rgb_fmt->save_rgb(dst_rgb, r, g, b);
}
/* Aling rgb pinters to 16 bit */
@@ -966,7 +1070,11 @@ YUVToRGB(const YUVDesc* yuv_fmt,
const void* yuv,
void* rgb,
int width,
- int height)
+ int height,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int y, x;
const int Y_Inc = yuv_fmt->Y_inc;
@@ -984,8 +1092,12 @@ YUVToRGB(const YUVDesc* yuv_fmt,
const uint8_t U = *pU;
const uint8_t V = *pV;
YUVToRGBPix(*pY, U, V, &r, &g, &b);
+ _change_white_balance_RGB_b(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB(&r, &g, &b, exp_comp);
rgb = rgb_fmt->save_rgb(rgb, r, g, b);
YUVToRGBPix(pY[Y_Inc], U, V, &r, &g, &b);
+ _change_white_balance_RGB_b(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB(&r, &g, &b, exp_comp);
rgb = rgb_fmt->save_rgb(rgb, r, g, b);
}
/* Aling rgb_ptr to 16 bit */
@@ -1000,7 +1112,11 @@ YUVToYUV(const YUVDesc* src_fmt,
const void* src,
void* dst,
int width,
- int height)
+ int height,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int y, x;
const int Y_Inc_src = src_fmt->Y_inc;
@@ -1027,7 +1143,9 @@ YUVToYUV(const YUVDesc* src_fmt,
pUdst += UV_inc_dst,
pVdst += UV_inc_dst) {
*pYdst = *pYsrc; *pUdst = *pUsrc; *pVdst = *pVsrc;
- pYdst[Y_Inc_dst] = pYsrc[Y_Inc_src];
+ _change_white_balance_YUV(pYdst, pUdst, pVdst, r_scale, g_scale, b_scale);
+ *pYdst = _change_exposure(*pYdst, exp_comp);
+ pYdst[Y_Inc_dst] = _change_exposure(pYsrc[Y_Inc_src], exp_comp);
}
}
}
@@ -1039,7 +1157,11 @@ BAYERToRGB(const BayerDesc* bayer_fmt,
const void* bayer,
void* rgb,
int width,
- int height)
+ int height,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int y, x;
for (y = 0; y < height; y++) {
@@ -1051,6 +1173,8 @@ BAYERToRGB(const BayerDesc* bayer_fmt,
} else if (bayer_fmt->mask == kBayer12) {
r >>= 4; g >>= 4; b >>= 4;
}
+ _change_white_balance_RGB(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB_i(&r, &g, &b, exp_comp);
rgb = rgb_fmt->save_rgb(rgb, r, g, b);
}
/* Aling rgb_ptr to 16 bit */
@@ -1065,7 +1189,11 @@ BAYERToYUV(const BayerDesc* bayer_fmt,
const void* bayer,
void* yuv,
int width,
- int height)
+ int height,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int y, x;
const int Y_Inc = yuv_fmt->Y_inc;
@@ -1081,8 +1209,12 @@ BAYERToYUV(const BayerDesc* bayer_fmt,
pY += Y_next_pair, pU += UV_inc, pV += UV_inc) {
int r, g, b;
_get_bayerRGB(bayer_fmt, bayer, x, y, width, height, &r, &g, &b);
+ _change_white_balance_RGB(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB_i(&r, &g, &b, exp_comp);
R8G8B8ToYUV(r, g, b, pY, pU, pV);
_get_bayerRGB(bayer_fmt, bayer, x + 1, y, width, height, &r, &g, &b);
+ _change_white_balance_RGB(&r, &g, &b, r_scale, g_scale, b_scale);
+ _change_exposure_RGB_i(&r, &g, &b, exp_comp);
pY[Y_Inc] = RGB2Y(r, g, b);
}
}
@@ -1484,7 +1616,11 @@ convert_frame(const void* frame,
int width,
int height,
ClientFrameBuffer* framebuffers,
- int fbs_num)
+ int fbs_num,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp)
{
int n;
const PIXFormat* src_desc = _get_pixel_format_descriptor(pixel_format);
@@ -1495,62 +1631,67 @@ convert_frame(const void* frame,
}
for (n = 0; n < fbs_num; n++) {
- if (framebuffers[n].pixel_format == pixel_format) {
- /* Same pixel format. No conversion needed: just make a copy. */
- memcpy(framebuffers[n].framebuffer, frame, framebuffer_size);
- } else {
- const PIXFormat* dst_desc =
- _get_pixel_format_descriptor(framebuffers[n].pixel_format);
- if (dst_desc == NULL) {
- E("%s: Destination pixel format %.4s is unknown",
- __FUNCTION__, (const char*)&framebuffers[n].pixel_format);
- return -1;
- }
- switch (src_desc->format_sel) {
- case PIX_FMT_RGB:
- if (dst_desc->format_sel == PIX_FMT_RGB) {
- RGBToRGB(src_desc->desc.rgb_desc, dst_desc->desc.rgb_desc,
- frame, framebuffers[n].framebuffer, width, height);
- } else if (dst_desc->format_sel == PIX_FMT_YUV) {
- RGBToYUV(src_desc->desc.rgb_desc, dst_desc->desc.yuv_desc,
- frame, framebuffers[n].framebuffer, width, height);
- } else {
- E("%s: Unexpected destination pixel format %d",
- __FUNCTION__, dst_desc->format_sel);
- return -1;
- }
- break;
- case PIX_FMT_YUV:
- if (dst_desc->format_sel == PIX_FMT_RGB) {
- YUVToRGB(src_desc->desc.yuv_desc, dst_desc->desc.rgb_desc,
- frame, framebuffers[n].framebuffer, width, height);
- } else if (dst_desc->format_sel == PIX_FMT_YUV) {
- YUVToYUV(src_desc->desc.yuv_desc, dst_desc->desc.yuv_desc,
- frame, framebuffers[n].framebuffer, width, height);
- } else {
- E("%s: Unexpected destination pixel format %d",
- __FUNCTION__, dst_desc->format_sel);
- return -1;
- }
- break;
- case PIX_FMT_BAYER:
- if (dst_desc->format_sel == PIX_FMT_RGB) {
- BAYERToRGB(src_desc->desc.bayer_desc, dst_desc->desc.rgb_desc,
- frame, framebuffers[n].framebuffer, width, height);
- } else if (dst_desc->format_sel == PIX_FMT_YUV) {
- BAYERToYUV(src_desc->desc.bayer_desc, dst_desc->desc.yuv_desc,
- frame, framebuffers[n].framebuffer, width, height);
- } else {
- E("%s: Unexpected destination pixel format %d",
- __FUNCTION__, dst_desc->format_sel);
- return -1;
- }
- break;
- default:
- E("%s: Unexpected source pixel format %d",
+ /* Note that we need to apply white balance, exposure compensation, etc.
+ * when we transfer the captured frame to the user framebuffer. So, even
+ * if source and destination formats are the same, we will have to go
+ * thrugh the converters to apply these things. */
+ const PIXFormat* dst_desc =
+ _get_pixel_format_descriptor(framebuffers[n].pixel_format);
+ if (dst_desc == NULL) {
+ E("%s: Destination pixel format %.4s is unknown",
+ __FUNCTION__, (const char*)&framebuffers[n].pixel_format);
+ return -1;
+ }
+ switch (src_desc->format_sel) {
+ case PIX_FMT_RGB:
+ if (dst_desc->format_sel == PIX_FMT_RGB) {
+ RGBToRGB(src_desc->desc.rgb_desc, dst_desc->desc.rgb_desc,
+ frame, framebuffers[n].framebuffer, width, height,
+ r_scale, g_scale, b_scale, exp_comp);
+ } else if (dst_desc->format_sel == PIX_FMT_YUV) {
+ RGBToYUV(src_desc->desc.rgb_desc, dst_desc->desc.yuv_desc,
+ frame, framebuffers[n].framebuffer, width, height,
+ r_scale, g_scale, b_scale, exp_comp);
+ } else {
+ E("%s: Unexpected destination pixel format %d",
__FUNCTION__, dst_desc->format_sel);
return -1;
- }
+ }
+ break;
+ case PIX_FMT_YUV:
+ if (dst_desc->format_sel == PIX_FMT_RGB) {
+ YUVToRGB(src_desc->desc.yuv_desc, dst_desc->desc.rgb_desc,
+ frame, framebuffers[n].framebuffer, width, height,
+ r_scale, g_scale, b_scale, exp_comp);
+ } else if (dst_desc->format_sel == PIX_FMT_YUV) {
+ YUVToYUV(src_desc->desc.yuv_desc, dst_desc->desc.yuv_desc,
+ frame, framebuffers[n].framebuffer, width, height,
+ r_scale, g_scale, b_scale, exp_comp);
+ } else {
+ E("%s: Unexpected destination pixel format %d",
+ __FUNCTION__, dst_desc->format_sel);
+ return -1;
+ }
+ break;
+ case PIX_FMT_BAYER:
+ if (dst_desc->format_sel == PIX_FMT_RGB) {
+ BAYERToRGB(src_desc->desc.bayer_desc, dst_desc->desc.rgb_desc,
+ frame, framebuffers[n].framebuffer, width, height,
+ r_scale, g_scale, b_scale, exp_comp);
+ } else if (dst_desc->format_sel == PIX_FMT_YUV) {
+ BAYERToYUV(src_desc->desc.bayer_desc, dst_desc->desc.yuv_desc,
+ frame, framebuffers[n].framebuffer, width, height,
+ r_scale, g_scale, b_scale, exp_comp);
+ } else {
+ E("%s: Unexpected destination pixel format %d",
+ __FUNCTION__, dst_desc->format_sel);
+ return -1;
+ }
+ break;
+ default:
+ E("%s: Unexpected source pixel format %d",
+ __FUNCTION__, dst_desc->format_sel);
+ return -1;
}
}
diff --git a/android/camera/camera-format-converters.h b/android/camera/camera-format-converters.h
index 6f1b492..0422798 100755
--- a/android/camera/camera-format-converters.h
+++ b/android/camera/camera-format-converters.h
@@ -53,6 +53,8 @@ extern int has_converter(uint32_t from, uint32_t to);
* make sure that buffers are large enough to contain entire frame captured
* from the device.
* fbs_num - Number of entries in the 'framebuffers' array.
+ * r_scale, g_scale, b_scale - White balance scale.
+ * exp_comp - Expsoure compensation.
* Return:
* 0 on success, or non-zero value on failure.
*/
@@ -62,6 +64,10 @@ extern int convert_frame(const void* frame,
int width,
int height,
ClientFrameBuffer* framebuffers,
- int fbs_num);
+ int fbs_num,
+ float r_scale,
+ float g_scale,
+ float b_scale,
+ float exp_comp);
#endif /* ANDROID_CAMERA_CAMERA_FORMAT_CONVERTERS_H */
diff --git a/android/camera/camera-service.c b/android/camera/camera-service.c
index e551dca..8f0e2ec 100644
--- a/android/camera/camera-service.c
+++ b/android/camera/camera-service.c
@@ -1167,10 +1167,15 @@ _camera_client_query_stop(CameraClient* cc, QemudClient* qc, const char* param)
* Param:
* cc - Queried camera client descriptor.
* qc - Qemu client for the emulated camera.
- * param - Query parameters. Parameters for this query must contain a 'video',
- * and a 'preview' parameters, both must be decimal values, defining size of
- * requested video, and preview frames respectively. Zero value for any of
- * the parameters means that this particular frame is not requested.
+ * param - Query parameters. Parameters for this query are formatted as such:
+ * video=<size> preview=<size> whiteb=<red>,<green>,<blue> expcomp=<comp>
+ * where:
+ * - 'video', and 'preview' both must be decimal values, defining size of
+ * requested video, and preview frames respectively. Zero value for any
+ * of these parameters means that this particular frame is not requested.
+ * - whiteb contains float values required to calculate whilte balance.
+ * - expcomp contains a float value required to calculate exposure
+ * compensation.
*/
static void
_camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param)
@@ -1182,6 +1187,8 @@ _camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param)
int fbs_num = 0;
size_t payload_size;
uint64_t tick;
+ float r_scale = 1.0f, g_scale = 1.0f, b_scale = 1.0f, exp_comp = 1.0f;
+ char tmp[256];
/* Sanity check. */
if (cc->video_frame == NULL) {
@@ -1201,6 +1208,22 @@ _camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param)
return;
}
+ /* Pull white balance values. */
+ if (!_get_param_value(param, "whiteb", tmp, sizeof(tmp))) {
+ if (sscanf(tmp, "%g,%g,%g", &r_scale, &g_scale, &b_scale) != 3) {
+ D("Invalid value '%s' for parameter 'whiteb'", tmp);
+ r_scale = g_scale = b_scale = 1.0f;
+ }
+ }
+
+ /* Pull exposure compensation. */
+ if (!_get_param_value(param, "expcomp", tmp, sizeof(tmp))) {
+ if (sscanf(tmp, "%g", &exp_comp) != 1) {
+ D("Invalid value '%s' for parameter 'whiteb'", tmp);
+ exp_comp = 1.0f;
+ }
+ }
+
/* Verify that framebuffer sizes match the ones that the started camera
* operates with. */
if ((video_size != 0 && cc->video_frame_size != video_size) ||
@@ -1231,7 +1254,8 @@ _camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param)
/* Capture new frame. */
tick = _get_timestamp();
- repeat = camera_device_read_frame(cc->camera, fbs, fbs_num);
+ repeat = camera_device_read_frame(cc->camera, fbs, fbs_num,
+ r_scale, g_scale, b_scale, exp_comp);
/* Note that there is no (known) way how to wait on next frame being
* available, so we could dequeue frame buffer from the device only when we
@@ -1248,7 +1272,8 @@ _camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param)
(_get_timestamp() - tick) < 2000000LL) {
/* Sleep for 10 millisec before repeating the attempt. */
_camera_sleep(10);
- repeat = camera_device_read_frame(cc->camera, fbs, fbs_num);
+ repeat = camera_device_read_frame(cc->camera, fbs, fbs_num,
+ r_scale, g_scale, b_scale, exp_comp);
}
if (repeat == 1 && !cc->frames_cached) {
/* Waited too long for the first frame. */