Merge changes from topic 'vts_camera_tests'
* changes:
Camera: Check for transport errors during VTS
Camera: Add Hal version 1 get-/setParameter tests
Camera: Add Hal version 1 sendcommand tests
Camera: Add Hal version 1 autofocus test cases
Camera: Add Hal version 1 video recording test cases
Camera: Add Hal version 1 image capture test cases
Camera: Add device ver. 1 preview test cases
Camera: Extend camera test cases for camera 1.0 devices
Camera: Add flush test cases
Camera: Enable and check for torch callbacks
diff --git a/camera/provider/2.4/vts/functional/Android.bp b/camera/provider/2.4/vts/functional/Android.bp
index f1215b8..a0be5cb 100644
--- a/camera/provider/2.4/vts/functional/Android.bp
+++ b/camera/provider/2.4/vts/functional/Android.bp
@@ -17,7 +17,8 @@
cc_test {
name: "VtsHalCameraProviderV2_4TargetTest",
defaults: ["hidl_defaults"],
- srcs: ["VtsHalCameraProviderV2_4TargetTest.cpp"],
+ srcs: ["VtsHalCameraProviderV2_4TargetTest.cpp",
+ "CameraParameters.cpp" ],
shared_libs: [
"liblog",
"libhidlbase",
@@ -26,7 +27,10 @@
"libutils",
"android.hardware.camera.provider@2.4",
"android.hardware.camera.device@3.2",
+ "android.hardware.camera.device@1.0",
"libcamera_metadata",
+ "libbinder",
+ "libgui",
"libui"
],
static_libs: ["VtsHalHidlTargetTestBase"],
diff --git a/camera/provider/2.4/vts/functional/CameraParameters.cpp b/camera/provider/2.4/vts/functional/CameraParameters.cpp
new file mode 100644
index 0000000..0285154
--- /dev/null
+++ b/camera/provider/2.4/vts/functional/CameraParameters.cpp
@@ -0,0 +1,537 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "CameraParams"
+#include <utils/Log.h>
+
+#include <string.h>
+#include <stdlib.h>
+#include "CameraParameters.h"
+#include <system/graphics.h>
+
+namespace android {
+// Parameter keys to communicate between camera application and driver.
+const char CameraParameters::KEY_PREVIEW_SIZE[] = "preview-size";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES[] = "preview-size-values";
+const char CameraParameters::KEY_PREVIEW_FORMAT[] = "preview-format";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS[] = "preview-format-values";
+const char CameraParameters::KEY_PREVIEW_FRAME_RATE[] = "preview-frame-rate";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES[] = "preview-frame-rate-values";
+const char CameraParameters::KEY_PREVIEW_FPS_RANGE[] = "preview-fps-range";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE[] = "preview-fps-range-values";
+const char CameraParameters::KEY_PICTURE_SIZE[] = "picture-size";
+const char CameraParameters::KEY_SUPPORTED_PICTURE_SIZES[] = "picture-size-values";
+const char CameraParameters::KEY_PICTURE_FORMAT[] = "picture-format";
+const char CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS[] = "picture-format-values";
+const char CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH[] = "jpeg-thumbnail-width";
+const char CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT[] = "jpeg-thumbnail-height";
+const char CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[] = "jpeg-thumbnail-size-values";
+const char CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY[] = "jpeg-thumbnail-quality";
+const char CameraParameters::KEY_JPEG_QUALITY[] = "jpeg-quality";
+const char CameraParameters::KEY_ROTATION[] = "rotation";
+const char CameraParameters::KEY_GPS_LATITUDE[] = "gps-latitude";
+const char CameraParameters::KEY_GPS_LONGITUDE[] = "gps-longitude";
+const char CameraParameters::KEY_GPS_ALTITUDE[] = "gps-altitude";
+const char CameraParameters::KEY_GPS_TIMESTAMP[] = "gps-timestamp";
+const char CameraParameters::KEY_GPS_PROCESSING_METHOD[] = "gps-processing-method";
+const char CameraParameters::KEY_WHITE_BALANCE[] = "whitebalance";
+const char CameraParameters::KEY_SUPPORTED_WHITE_BALANCE[] = "whitebalance-values";
+const char CameraParameters::KEY_EFFECT[] = "effect";
+const char CameraParameters::KEY_SUPPORTED_EFFECTS[] = "effect-values";
+const char CameraParameters::KEY_ANTIBANDING[] = "antibanding";
+const char CameraParameters::KEY_SUPPORTED_ANTIBANDING[] = "antibanding-values";
+const char CameraParameters::KEY_SCENE_MODE[] = "scene-mode";
+const char CameraParameters::KEY_SUPPORTED_SCENE_MODES[] = "scene-mode-values";
+const char CameraParameters::KEY_FLASH_MODE[] = "flash-mode";
+const char CameraParameters::KEY_SUPPORTED_FLASH_MODES[] = "flash-mode-values";
+const char CameraParameters::KEY_FOCUS_MODE[] = "focus-mode";
+const char CameraParameters::KEY_SUPPORTED_FOCUS_MODES[] = "focus-mode-values";
+const char CameraParameters::KEY_MAX_NUM_FOCUS_AREAS[] = "max-num-focus-areas";
+const char CameraParameters::KEY_FOCUS_AREAS[] = "focus-areas";
+const char CameraParameters::KEY_FOCAL_LENGTH[] = "focal-length";
+const char CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE[] = "horizontal-view-angle";
+const char CameraParameters::KEY_VERTICAL_VIEW_ANGLE[] = "vertical-view-angle";
+const char CameraParameters::KEY_EXPOSURE_COMPENSATION[] = "exposure-compensation";
+const char CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION[] = "max-exposure-compensation";
+const char CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION[] = "min-exposure-compensation";
+const char CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP[] = "exposure-compensation-step";
+const char CameraParameters::KEY_AUTO_EXPOSURE_LOCK[] = "auto-exposure-lock";
+const char CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[] = "auto-exposure-lock-supported";
+const char CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK[] = "auto-whitebalance-lock";
+const char CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[] = "auto-whitebalance-lock-supported";
+const char CameraParameters::KEY_MAX_NUM_METERING_AREAS[] = "max-num-metering-areas";
+const char CameraParameters::KEY_METERING_AREAS[] = "metering-areas";
+const char CameraParameters::KEY_ZOOM[] = "zoom";
+const char CameraParameters::KEY_MAX_ZOOM[] = "max-zoom";
+const char CameraParameters::KEY_ZOOM_RATIOS[] = "zoom-ratios";
+const char CameraParameters::KEY_ZOOM_SUPPORTED[] = "zoom-supported";
+const char CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED[] = "smooth-zoom-supported";
+const char CameraParameters::KEY_FOCUS_DISTANCES[] = "focus-distances";
+const char CameraParameters::KEY_VIDEO_FRAME_FORMAT[] = "video-frame-format";
+const char CameraParameters::KEY_VIDEO_SIZE[] = "video-size";
+const char CameraParameters::KEY_SUPPORTED_VIDEO_SIZES[] = "video-size-values";
+const char CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[] = "preferred-preview-size-for-video";
+const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW[] = "max-num-detected-faces-hw";
+const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW[] = "max-num-detected-faces-sw";
+const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint";
+const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
+const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
+const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
+const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
+
+const char CameraParameters::TRUE[] = "true";
+const char CameraParameters::FALSE[] = "false";
+const char CameraParameters::FOCUS_DISTANCE_INFINITY[] = "Infinity";
+
+// Values for white balance settings.
+const char CameraParameters::WHITE_BALANCE_AUTO[] = "auto";
+const char CameraParameters::WHITE_BALANCE_INCANDESCENT[] = "incandescent";
+const char CameraParameters::WHITE_BALANCE_FLUORESCENT[] = "fluorescent";
+const char CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT[] = "warm-fluorescent";
+const char CameraParameters::WHITE_BALANCE_DAYLIGHT[] = "daylight";
+const char CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT[] = "cloudy-daylight";
+const char CameraParameters::WHITE_BALANCE_TWILIGHT[] = "twilight";
+const char CameraParameters::WHITE_BALANCE_SHADE[] = "shade";
+
+// Values for effect settings.
+const char CameraParameters::EFFECT_NONE[] = "none";
+const char CameraParameters::EFFECT_MONO[] = "mono";
+const char CameraParameters::EFFECT_NEGATIVE[] = "negative";
+const char CameraParameters::EFFECT_SOLARIZE[] = "solarize";
+const char CameraParameters::EFFECT_SEPIA[] = "sepia";
+const char CameraParameters::EFFECT_POSTERIZE[] = "posterize";
+const char CameraParameters::EFFECT_WHITEBOARD[] = "whiteboard";
+const char CameraParameters::EFFECT_BLACKBOARD[] = "blackboard";
+const char CameraParameters::EFFECT_AQUA[] = "aqua";
+
+// Values for antibanding settings.
+const char CameraParameters::ANTIBANDING_AUTO[] = "auto";
+const char CameraParameters::ANTIBANDING_50HZ[] = "50hz";
+const char CameraParameters::ANTIBANDING_60HZ[] = "60hz";
+const char CameraParameters::ANTIBANDING_OFF[] = "off";
+
+// Values for flash mode settings.
+const char CameraParameters::FLASH_MODE_OFF[] = "off";
+const char CameraParameters::FLASH_MODE_AUTO[] = "auto";
+const char CameraParameters::FLASH_MODE_ON[] = "on";
+const char CameraParameters::FLASH_MODE_RED_EYE[] = "red-eye";
+const char CameraParameters::FLASH_MODE_TORCH[] = "torch";
+
+// Values for scene mode settings.
+const char CameraParameters::SCENE_MODE_AUTO[] = "auto";
+const char CameraParameters::SCENE_MODE_ACTION[] = "action";
+const char CameraParameters::SCENE_MODE_PORTRAIT[] = "portrait";
+const char CameraParameters::SCENE_MODE_LANDSCAPE[] = "landscape";
+const char CameraParameters::SCENE_MODE_NIGHT[] = "night";
+const char CameraParameters::SCENE_MODE_NIGHT_PORTRAIT[] = "night-portrait";
+const char CameraParameters::SCENE_MODE_THEATRE[] = "theatre";
+const char CameraParameters::SCENE_MODE_BEACH[] = "beach";
+const char CameraParameters::SCENE_MODE_SNOW[] = "snow";
+const char CameraParameters::SCENE_MODE_SUNSET[] = "sunset";
+const char CameraParameters::SCENE_MODE_STEADYPHOTO[] = "steadyphoto";
+const char CameraParameters::SCENE_MODE_FIREWORKS[] = "fireworks";
+const char CameraParameters::SCENE_MODE_SPORTS[] = "sports";
+const char CameraParameters::SCENE_MODE_PARTY[] = "party";
+const char CameraParameters::SCENE_MODE_CANDLELIGHT[] = "candlelight";
+const char CameraParameters::SCENE_MODE_BARCODE[] = "barcode";
+const char CameraParameters::SCENE_MODE_HDR[] = "hdr";
+
+const char CameraParameters::PIXEL_FORMAT_YUV422SP[] = "yuv422sp";
+const char CameraParameters::PIXEL_FORMAT_YUV420SP[] = "yuv420sp";
+const char CameraParameters::PIXEL_FORMAT_YUV422I[] = "yuv422i-yuyv";
+const char CameraParameters::PIXEL_FORMAT_YUV420P[] = "yuv420p";
+const char CameraParameters::PIXEL_FORMAT_RGB565[] = "rgb565";
+const char CameraParameters::PIXEL_FORMAT_RGBA8888[] = "rgba8888";
+const char CameraParameters::PIXEL_FORMAT_JPEG[] = "jpeg";
+const char CameraParameters::PIXEL_FORMAT_BAYER_RGGB[] = "bayer-rggb";
+const char CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE[] = "android-opaque";
+
+// Values for focus mode settings.
+const char CameraParameters::FOCUS_MODE_AUTO[] = "auto";
+const char CameraParameters::FOCUS_MODE_INFINITY[] = "infinity";
+const char CameraParameters::FOCUS_MODE_MACRO[] = "macro";
+const char CameraParameters::FOCUS_MODE_FIXED[] = "fixed";
+const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
+const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
+const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
+
+// Values for light fx settings
+const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
+const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
+
+CameraParameters::CameraParameters()
+ : mMap()
+{
+}
+
+CameraParameters::~CameraParameters()
+{
+}
+
+String8 CameraParameters::flatten() const
+{
+ String8 flattened("");
+ size_t size = mMap.size();
+
+ for (size_t i = 0; i < size; i++) {
+ String8 k, v;
+ k = mMap.keyAt(i);
+ v = mMap.valueAt(i);
+
+ flattened += k;
+ flattened += "=";
+ flattened += v;
+ if (i != size-1)
+ flattened += ";";
+ }
+
+ return flattened;
+}
+
+void CameraParameters::unflatten(const String8 ¶ms)
+{
+ const char *a = params.string();
+ const char *b;
+
+ mMap.clear();
+
+ for (;;) {
+ // Find the bounds of the key name.
+ b = strchr(a, '=');
+ if (b == 0)
+ break;
+
+ // Create the key string.
+ String8 k(a, (size_t)(b-a));
+
+ // Find the value.
+ a = b+1;
+ b = strchr(a, ';');
+ if (b == 0) {
+ // If there's no semicolon, this is the last item.
+ String8 v(a);
+ mMap.add(k, v);
+ break;
+ }
+
+ String8 v(a, (size_t)(b-a));
+ mMap.add(k, v);
+ a = b+1;
+ }
+}
+
+
+void CameraParameters::set(const char *key, const char *value)
+{
+ // i think i can do this with strspn()
+ if (strchr(key, '=') || strchr(key, ';')) {
+ // ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
+ return;
+ }
+
+ if (strchr(value, '=') || strchr(value, ';')) {
+ // ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
+ return;
+ }
+
+ mMap.replaceValueFor(String8(key), String8(value));
+}
+
+void CameraParameters::set(const char *key, int value)
+{
+ char str[16];
+ sprintf(str, "%d", value);
+ set(key, str);
+}
+
+void CameraParameters::setFloat(const char *key, float value)
+{
+ char str[16]; // 14 should be enough. We overestimate to be safe.
+ snprintf(str, sizeof(str), "%g", value);
+ set(key, str);
+}
+
+const char *CameraParameters::get(const char *key) const
+{
+ String8 v = mMap.valueFor(String8(key));
+ if (v.length() == 0)
+ return 0;
+ return v.string();
+}
+
+int CameraParameters::getInt(const char *key) const
+{
+ const char *v = get(key);
+ if (v == 0)
+ return -1;
+ return strtol(v, 0, 0);
+}
+
+float CameraParameters::getFloat(const char *key) const
+{
+ const char *v = get(key);
+ if (v == 0) return -1;
+ return strtof(v, 0);
+}
+
+void CameraParameters::remove(const char *key)
+{
+ mMap.removeItem(String8(key));
+}
+
+// Parse string like "640x480" or "10000,20000"
+static int parse_pair(const char *str, int *first, int *second, char delim,
+ char **endptr = NULL)
+{
+ // Find the first integer.
+ char *end;
+ int w = (int)strtol(str, &end, 10);
+ // If a delimeter does not immediately follow, give up.
+ if (*end != delim) {
+ ALOGE("Cannot find delimeter (%c) in str=%s", delim, str);
+ return -1;
+ }
+
+ // Find the second integer, immediately after the delimeter.
+ int h = (int)strtol(end+1, &end, 10);
+
+ *first = w;
+ *second = h;
+
+ if (endptr) {
+ *endptr = end;
+ }
+
+ return 0;
+}
+
+static void parseSizesList(const char *sizesStr, Vector<Size> &sizes)
+{
+ if (sizesStr == 0) {
+ return;
+ }
+
+ char *sizeStartPtr = (char *)sizesStr;
+
+ while (true) {
+ int width, height;
+ int success = parse_pair(sizeStartPtr, &width, &height, 'x',
+ &sizeStartPtr);
+ if (success == -1 || (*sizeStartPtr != ',' && *sizeStartPtr != '\0')) {
+ ALOGE("Picture sizes string \"%s\" contains invalid character.", sizesStr);
+ return;
+ }
+ sizes.push(Size(width, height));
+
+ if (*sizeStartPtr == '\0') {
+ return;
+ }
+ sizeStartPtr++;
+ }
+}
+
+void CameraParameters::setPreviewSize(int width, int height)
+{
+ char str[32];
+ sprintf(str, "%dx%d", width, height);
+ set(KEY_PREVIEW_SIZE, str);
+}
+
+void CameraParameters::getPreviewSize(int *width, int *height) const
+{
+ *width = *height = -1;
+ // Get the current string, if it doesn't exist, leave the -1x-1
+ const char *p = get(KEY_PREVIEW_SIZE);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getPreferredPreviewSizeForVideo(int *width, int *height) const
+{
+ *width = *height = -1;
+ const char *p = get(KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getSupportedPreviewSizes(Vector<Size> &sizes) const
+{
+ const char *previewSizesStr = get(KEY_SUPPORTED_PREVIEW_SIZES);
+ parseSizesList(previewSizesStr, sizes);
+}
+
+void CameraParameters::setVideoSize(int width, int height)
+{
+ char str[32];
+ sprintf(str, "%dx%d", width, height);
+ set(KEY_VIDEO_SIZE, str);
+}
+
+void CameraParameters::getVideoSize(int *width, int *height) const
+{
+ *width = *height = -1;
+ const char *p = get(KEY_VIDEO_SIZE);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getSupportedVideoSizes(Vector<Size> &sizes) const
+{
+ const char *videoSizesStr = get(KEY_SUPPORTED_VIDEO_SIZES);
+ parseSizesList(videoSizesStr, sizes);
+}
+
+void CameraParameters::setPreviewFrameRate(int fps)
+{
+ set(KEY_PREVIEW_FRAME_RATE, fps);
+}
+
+int CameraParameters::getPreviewFrameRate() const
+{
+ return getInt(KEY_PREVIEW_FRAME_RATE);
+}
+
+void CameraParameters::getPreviewFpsRange(int *min_fps, int *max_fps) const
+{
+ *min_fps = *max_fps = -1;
+ const char *p = get(KEY_PREVIEW_FPS_RANGE);
+ if (p == 0) return;
+ parse_pair(p, min_fps, max_fps, ',');
+}
+
+void CameraParameters::setPreviewFormat(const char *format)
+{
+ set(KEY_PREVIEW_FORMAT, format);
+}
+
+const char *CameraParameters::getPreviewFormat() const
+{
+ return get(KEY_PREVIEW_FORMAT);
+}
+
+void CameraParameters::setPictureSize(int width, int height)
+{
+ char str[32];
+ sprintf(str, "%dx%d", width, height);
+ set(KEY_PICTURE_SIZE, str);
+}
+
+void CameraParameters::getPictureSize(int *width, int *height) const
+{
+ *width = *height = -1;
+ // Get the current string, if it doesn't exist, leave the -1x-1
+ const char *p = get(KEY_PICTURE_SIZE);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getSupportedPictureSizes(Vector<Size> &sizes) const
+{
+ const char *pictureSizesStr = get(KEY_SUPPORTED_PICTURE_SIZES);
+ parseSizesList(pictureSizesStr, sizes);
+}
+
+void CameraParameters::setPictureFormat(const char *format)
+{
+ set(KEY_PICTURE_FORMAT, format);
+}
+
+const char *CameraParameters::getPictureFormat() const
+{
+ return get(KEY_PICTURE_FORMAT);
+}
+
+void CameraParameters::dump() const
+{
+ ALOGD("dump: mMap.size = %zu", mMap.size());
+ for (size_t i = 0; i < mMap.size(); i++) {
+ String8 k, v;
+ k = mMap.keyAt(i);
+ v = mMap.valueAt(i);
+ ALOGD("%s: %s\n", k.string(), v.string());
+ }
+}
+
+status_t CameraParameters::dump(int fd, const Vector<String16>& /*args*/) const
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+ snprintf(buffer, 255, "CameraParameters::dump: mMap.size = %zu\n", mMap.size());
+ result.append(buffer);
+ for (size_t i = 0; i < mMap.size(); i++) {
+ String8 k, v;
+ k = mMap.keyAt(i);
+ v = mMap.valueAt(i);
+ snprintf(buffer, 255, "\t%s: %s\n", k.string(), v.string());
+ result.append(buffer);
+ }
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
+void CameraParameters::getSupportedPreviewFormats(Vector<int>& formats) const {
+ const char* supportedPreviewFormats =
+ get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
+
+ if (supportedPreviewFormats == NULL) {
+ ALOGW("%s: No supported preview formats.", __FUNCTION__);
+ return;
+ }
+
+ String8 fmtStr(supportedPreviewFormats);
+ char* prevFmts = fmtStr.lockBuffer(fmtStr.size());
+
+ char* savePtr;
+ char* fmt = strtok_r(prevFmts, ",", &savePtr);
+ while (fmt) {
+ int actual = previewFormatToEnum(fmt);
+ if (actual != -1) {
+ formats.add(actual);
+ }
+ fmt = strtok_r(NULL, ",", &savePtr);
+ }
+ fmtStr.unlockBuffer(fmtStr.size());
+}
+
+
+int CameraParameters::previewFormatToEnum(const char* format) {
+ return
+ !format ?
+ HAL_PIXEL_FORMAT_YCrCb_420_SP :
+ !strcmp(format, PIXEL_FORMAT_YUV422SP) ?
+ HAL_PIXEL_FORMAT_YCbCr_422_SP : // NV16
+ !strcmp(format, PIXEL_FORMAT_YUV420SP) ?
+ HAL_PIXEL_FORMAT_YCrCb_420_SP : // NV21
+ !strcmp(format, PIXEL_FORMAT_YUV422I) ?
+ HAL_PIXEL_FORMAT_YCbCr_422_I : // YUY2
+ !strcmp(format, PIXEL_FORMAT_YUV420P) ?
+ HAL_PIXEL_FORMAT_YV12 : // YV12
+ !strcmp(format, PIXEL_FORMAT_RGB565) ?
+ HAL_PIXEL_FORMAT_RGB_565 : // RGB565
+ !strcmp(format, PIXEL_FORMAT_RGBA8888) ?
+ HAL_PIXEL_FORMAT_RGBA_8888 : // RGB8888
+ !strcmp(format, PIXEL_FORMAT_BAYER_RGGB) ?
+ HAL_PIXEL_FORMAT_RAW16 : // Raw sensor data
+ -1;
+}
+
+bool CameraParameters::isEmpty() const {
+ return mMap.isEmpty();
+}
+
+}; // namespace android
diff --git a/camera/provider/2.4/vts/functional/CameraParameters.h b/camera/provider/2.4/vts/functional/CameraParameters.h
new file mode 100644
index 0000000..ba33ffe
--- /dev/null
+++ b/camera/provider/2.4/vts/functional/CameraParameters.h
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS_H
+#define ANDROID_HARDWARE_CAMERA_PARAMETERS_H
+
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+struct Size {
+ int width;
+ int height;
+
+ Size() {
+ width = 0;
+ height = 0;
+ }
+
+ Size(int w, int h) {
+ width = w;
+ height = h;
+ }
+};
+
+class CameraParameters
+{
+public:
+ CameraParameters();
+ CameraParameters(const String8 ¶ms) { unflatten(params); }
+ ~CameraParameters();
+
+ String8 flatten() const;
+ void unflatten(const String8 ¶ms);
+
+ void set(const char *key, const char *value);
+ void set(const char *key, int value);
+ void setFloat(const char *key, float value);
+ const char *get(const char *key) const;
+ int getInt(const char *key) const;
+ float getFloat(const char *key) const;
+
+ void remove(const char *key);
+
+ void setPreviewSize(int width, int height);
+ void getPreviewSize(int *width, int *height) const;
+ void getSupportedPreviewSizes(Vector<Size> &sizes) const;
+
+ // Set the dimensions in pixels to the given width and height
+ // for video frames. The given width and height must be one
+ // of the supported dimensions returned from
+ // getSupportedVideoSizes(). Must not be called if
+ // getSupportedVideoSizes() returns an empty Vector of Size.
+ void setVideoSize(int width, int height);
+ // Retrieve the current dimensions (width and height)
+ // in pixels for video frames, which must be one of the
+ // supported dimensions returned from getSupportedVideoSizes().
+ // Must not be called if getSupportedVideoSizes() returns an
+ // empty Vector of Size.
+ void getVideoSize(int *width, int *height) const;
+ // Retrieve a Vector of supported dimensions (width and height)
+ // in pixels for video frames. If sizes returned from the method
+ // is empty, the camera does not support calls to setVideoSize()
+ // or getVideoSize(). In adddition, it also indicates that
+ // the camera only has a single output, and does not have
+ // separate output for video frames and preview frame.
+ void getSupportedVideoSizes(Vector<Size> &sizes) const;
+ // Retrieve the preferred preview size (width and height) in pixels
+ // for video recording. The given width and height must be one of
+ // supported preview sizes returned from getSupportedPreviewSizes().
+ // Must not be called if getSupportedVideoSizes() returns an empty
+ // Vector of Size. If getSupportedVideoSizes() returns an empty
+ // Vector of Size, the width and height returned from this method
+ // is invalid, and is "-1x-1".
+ void getPreferredPreviewSizeForVideo(int *width, int *height) const;
+
+ void setPreviewFrameRate(int fps);
+ int getPreviewFrameRate() const;
+ void getPreviewFpsRange(int *min_fps, int *max_fps) const;
+ void setPreviewFormat(const char *format);
+ const char *getPreviewFormat() const;
+ void setPictureSize(int width, int height);
+ void getPictureSize(int *width, int *height) const;
+ void getSupportedPictureSizes(Vector<Size> &sizes) const;
+ void setPictureFormat(const char *format);
+ const char *getPictureFormat() const;
+
+ void dump() const;
+ status_t dump(int fd, const Vector<String16>& args) const;
+
+ /**
+ * Returns a Vector containing the supported preview formats
+ * as enums given in graphics.h.
+ */
+ void getSupportedPreviewFormats(Vector<int>& formats) const;
+
+ // Returns true if no keys are present
+ bool isEmpty() const;
+
+ // Parameter keys to communicate between camera application and driver.
+ // The access (read/write, read only, or write only) is viewed from the
+ // perspective of applications, not driver.
+
+ // Preview frame size in pixels (width x height).
+ // Example value: "480x320". Read/Write.
+ static const char KEY_PREVIEW_SIZE[];
+ // Supported preview frame sizes in pixels.
+ // Example value: "800x600,480x320". Read only.
+ static const char KEY_SUPPORTED_PREVIEW_SIZES[];
+ // The current minimum and maximum preview fps. This controls the rate of
+ // preview frames received (CAMERA_MSG_PREVIEW_FRAME). The minimum and
+ // maximum fps must be one of the elements from
+ // KEY_SUPPORTED_PREVIEW_FPS_RANGE parameter.
+ // Example value: "10500,26623"
+ static const char KEY_PREVIEW_FPS_RANGE[];
+ // The supported preview fps (frame-per-second) ranges. Each range contains
+ // a minimum fps and maximum fps. If minimum fps equals to maximum fps, the
+ // camera outputs frames in fixed frame rate. If not, the camera outputs
+ // frames in auto frame rate. The actual frame rate fluctuates between the
+ // minimum and the maximum. The list has at least one element. The list is
+ // sorted from small to large (first by maximum fps and then minimum fps).
+ // Example value: "(10500,26623),(15000,26623),(30000,30000)"
+ static const char KEY_SUPPORTED_PREVIEW_FPS_RANGE[];
+ // The image format for preview frames. See CAMERA_MSG_PREVIEW_FRAME in
+ // frameworks/av/include/camera/Camera.h. The default is
+ // PIXEL_FORMAT_YUV420SP. Example value: "yuv420sp" or PIXEL_FORMAT_XXX
+ // constants. Read/write.
+ static const char KEY_PREVIEW_FORMAT[];
+ // Supported image formats for preview frames.
+ // Example value: "yuv420sp,yuv422i-yuyv". Read only.
+ static const char KEY_SUPPORTED_PREVIEW_FORMATS[];
+ // Number of preview frames per second. This is the target frame rate. The
+ // actual frame rate depends on the driver.
+ // Example value: "15". Read/write.
+ static const char KEY_PREVIEW_FRAME_RATE[];
+ // Supported number of preview frames per second.
+ // Example value: "24,15,10". Read.
+ static const char KEY_SUPPORTED_PREVIEW_FRAME_RATES[];
+ // The dimensions for captured pictures in pixels (width x height).
+ // Example value: "1024x768". Read/write.
+ static const char KEY_PICTURE_SIZE[];
+ // Supported dimensions for captured pictures in pixels.
+ // Example value: "2048x1536,1024x768". Read only.
+ static const char KEY_SUPPORTED_PICTURE_SIZES[];
+ // The image format for captured pictures. See CAMERA_MSG_COMPRESSED_IMAGE
+ // in frameworks/base/include/camera/Camera.h.
+ // Example value: "jpeg" or PIXEL_FORMAT_XXX constants. Read/write.
+ static const char KEY_PICTURE_FORMAT[];
+ // Supported image formats for captured pictures.
+ // Example value: "jpeg,rgb565". Read only.
+ static const char KEY_SUPPORTED_PICTURE_FORMATS[];
+ // The width (in pixels) of EXIF thumbnail in Jpeg picture.
+ // Example value: "512". Read/write.
+ static const char KEY_JPEG_THUMBNAIL_WIDTH[];
+ // The height (in pixels) of EXIF thumbnail in Jpeg picture.
+ // Example value: "384". Read/write.
+ static const char KEY_JPEG_THUMBNAIL_HEIGHT[];
+ // Supported EXIF thumbnail sizes (width x height). 0x0 means not thumbnail
+ // in EXIF.
+ // Example value: "512x384,320x240,0x0". Read only.
+ static const char KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[];
+ // The quality of the EXIF thumbnail in Jpeg picture. The range is 1 to 100,
+ // with 100 being the best.
+ // Example value: "90". Read/write.
+ static const char KEY_JPEG_THUMBNAIL_QUALITY[];
+ // Jpeg quality of captured picture. The range is 1 to 100, with 100 being
+ // the best.
+ // Example value: "90". Read/write.
+ static const char KEY_JPEG_QUALITY[];
+ // The rotation angle in degrees relative to the orientation of the camera.
+ // This affects the pictures returned from CAMERA_MSG_COMPRESSED_IMAGE. The
+ // camera driver may set orientation in the EXIF header without rotating the
+ // picture. Or the driver may rotate the picture and the EXIF thumbnail. If
+ // the Jpeg picture is rotated, the orientation in the EXIF header will be
+ // missing or 1 (row #0 is top and column #0 is left side).
+ //
+ // Note that the JPEG pictures of front-facing cameras are not mirrored
+ // as in preview display.
+ //
+ // For example, suppose the natural orientation of the device is portrait.
+ // The device is rotated 270 degrees clockwise, so the device orientation is
+ // 270. Suppose a back-facing camera sensor is mounted in landscape and the
+ // top side of the camera sensor is aligned with the right edge of the
+ // display in natural orientation. So the camera orientation is 90. The
+ // rotation should be set to 0 (270 + 90).
+ //
+ // Example value: "0" or "90" or "180" or "270". Write only.
+ static const char KEY_ROTATION[];
+ // GPS latitude coordinate. GPSLatitude and GPSLatitudeRef will be stored in
+ // JPEG EXIF header.
+ // Example value: "25.032146" or "-33.462809". Write only.
+ static const char KEY_GPS_LATITUDE[];
+ // GPS longitude coordinate. GPSLongitude and GPSLongitudeRef will be stored
+ // in JPEG EXIF header.
+ // Example value: "121.564448" or "-70.660286". Write only.
+ static const char KEY_GPS_LONGITUDE[];
+ // GPS altitude. GPSAltitude and GPSAltitudeRef will be stored in JPEG EXIF
+ // header.
+ // Example value: "21.0" or "-5". Write only.
+ static const char KEY_GPS_ALTITUDE[];
+ // GPS timestamp (UTC in seconds since January 1, 1970). This should be
+ // stored in JPEG EXIF header.
+ // Example value: "1251192757". Write only.
+ static const char KEY_GPS_TIMESTAMP[];
+ // GPS Processing Method
+ // Example value: "GPS" or "NETWORK". Write only.
+ static const char KEY_GPS_PROCESSING_METHOD[];
+ // Current white balance setting.
+ // Example value: "auto" or WHITE_BALANCE_XXX constants. Read/write.
+ static const char KEY_WHITE_BALANCE[];
+ // Supported white balance settings.
+ // Example value: "auto,incandescent,daylight". Read only.
+ static const char KEY_SUPPORTED_WHITE_BALANCE[];
+ // Current color effect setting.
+ // Example value: "none" or EFFECT_XXX constants. Read/write.
+ static const char KEY_EFFECT[];
+ // Supported color effect settings.
+ // Example value: "none,mono,sepia". Read only.
+ static const char KEY_SUPPORTED_EFFECTS[];
+ // Current antibanding setting.
+ // Example value: "auto" or ANTIBANDING_XXX constants. Read/write.
+ static const char KEY_ANTIBANDING[];
+ // Supported antibanding settings.
+ // Example value: "auto,50hz,60hz,off". Read only.
+ static const char KEY_SUPPORTED_ANTIBANDING[];
+ // Current scene mode.
+ // Example value: "auto" or SCENE_MODE_XXX constants. Read/write.
+ static const char KEY_SCENE_MODE[];
+ // Supported scene mode settings.
+ // Example value: "auto,night,fireworks". Read only.
+ static const char KEY_SUPPORTED_SCENE_MODES[];
+ // Current flash mode.
+ // Example value: "auto" or FLASH_MODE_XXX constants. Read/write.
+ static const char KEY_FLASH_MODE[];
+ // Supported flash modes.
+ // Example value: "auto,on,off". Read only.
+ static const char KEY_SUPPORTED_FLASH_MODES[];
+ // Current focus mode. This will not be empty. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus if focus mode is
+ // FOCUS_MODE_AUTO or FOCUS_MODE_MACRO.
+ // Example value: "auto" or FOCUS_MODE_XXX constants. Read/write.
+ static const char KEY_FOCUS_MODE[];
+ // Supported focus modes.
+ // Example value: "auto,macro,fixed". Read only.
+ static const char KEY_SUPPORTED_FOCUS_MODES[];
+ // The maximum number of focus areas supported. This is the maximum length
+ // of KEY_FOCUS_AREAS.
+ // Example value: "0" or "2". Read only.
+ static const char KEY_MAX_NUM_FOCUS_AREAS[];
+ // Current focus areas.
+ //
+ // Before accessing this parameter, apps should check
+ // KEY_MAX_NUM_FOCUS_AREAS first to know the maximum number of focus areas
+ // first. If the value is 0, focus area is not supported.
+ //
+ // Each focus area is a five-element int array. The first four elements are
+ // the rectangle of the area (left, top, right, bottom). The direction is
+ // relative to the sensor orientation, that is, what the sensor sees. The
+ // direction is not affected by the rotation or mirroring of
+ // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates range from -1000 to 1000.
+ // (-1000,-1000) is the upper left point. (1000, 1000) is the lower right
+ // point. The width and height of focus areas cannot be 0 or negative.
+ //
+ // The fifth element is the weight. Values for weight must range from 1 to
+ // 1000. The weight should be interpreted as a per-pixel weight - all
+ // pixels in the area have the specified weight. This means a small area
+ // with the same weight as a larger area will have less influence on the
+ // focusing than the larger area. Focus areas can partially overlap and the
+ // driver will add the weights in the overlap region.
+ //
+ // A special case of single focus area (0,0,0,0,0) means driver to decide
+ // the focus area. For example, the driver may use more signals to decide
+ // focus areas and change them dynamically. Apps can set (0,0,0,0,0) if they
+ // want the driver to decide focus areas.
+ //
+ // Focus areas are relative to the current field of view (KEY_ZOOM). No
+ // matter what the zoom level is, (-1000,-1000) represents the top of the
+ // currently visible camera frame. The focus area cannot be set to be
+ // outside the current field of view, even when using zoom.
+ //
+ // Focus area only has effect if the current focus mode is FOCUS_MODE_AUTO,
+ // FOCUS_MODE_MACRO, FOCUS_MODE_CONTINUOUS_VIDEO, or
+ // FOCUS_MODE_CONTINUOUS_PICTURE.
+ // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
+ static const char KEY_FOCUS_AREAS[];
+ // Focal length in millimeter.
+ // Example value: "4.31". Read only.
+ static const char KEY_FOCAL_LENGTH[];
+ // Horizontal angle of view in degrees.
+ // Example value: "54.8". Read only.
+ static const char KEY_HORIZONTAL_VIEW_ANGLE[];
+ // Vertical angle of view in degrees.
+ // Example value: "42.5". Read only.
+ static const char KEY_VERTICAL_VIEW_ANGLE[];
+ // Exposure compensation index. 0 means exposure is not adjusted.
+ // Example value: "-5" or "5". Read/write.
+ static const char KEY_EXPOSURE_COMPENSATION[];
+ // The maximum exposure compensation index (>=0).
+ // Example value: "6". Read only.
+ static const char KEY_MAX_EXPOSURE_COMPENSATION[];
+ // The minimum exposure compensation index (<=0).
+ // Example value: "-6". Read only.
+ static const char KEY_MIN_EXPOSURE_COMPENSATION[];
+ // The exposure compensation step. Exposure compensation index multiply by
+ // step eqals to EV. Ex: if exposure compensation index is -6 and step is
+ // 0.3333, EV is -2.
+ // Example value: "0.333333333" or "0.5". Read only.
+ static const char KEY_EXPOSURE_COMPENSATION_STEP[];
+ // The state of the auto-exposure lock. "true" means that
+ // auto-exposure is locked to its current value and will not
+ // change. "false" means the auto-exposure routine is free to
+ // change exposure values. If auto-exposure is already locked,
+ // setting this to true again has no effect (the driver will not
+ // recalculate exposure values). Changing exposure compensation
+ // settings will still affect the exposure settings while
+ // auto-exposure is locked. Stopping preview or taking a still
+ // image will not change the lock. In conjunction with
+ // exposure compensation, this allows for capturing multi-exposure
+ // brackets with known relative exposure values. Locking
+ // auto-exposure after open but before the first call to
+ // startPreview may result in severely over- or under-exposed
+ // images. The driver will not change the AE lock after
+ // auto-focus completes.
+ static const char KEY_AUTO_EXPOSURE_LOCK[];
+ // Whether locking the auto-exposure is supported. "true" means it is, and
+ // "false" or this key not existing means it is not supported.
+ static const char KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[];
+ // The state of the auto-white balance lock. "true" means that
+ // auto-white balance is locked to its current value and will not
+ // change. "false" means the auto-white balance routine is free to
+ // change white balance values. If auto-white balance is already
+ // locked, setting this to true again has no effect (the driver
+ // will not recalculate white balance values). Stopping preview or
+ // taking a still image will not change the lock. In conjunction
+ // with exposure compensation, this allows for capturing
+ // multi-exposure brackets with fixed white balance. Locking
+ // auto-white balance after open but before the first call to
+ // startPreview may result in severely incorrect color. The
+ // driver will not change the AWB lock after auto-focus
+ // completes.
+ static const char KEY_AUTO_WHITEBALANCE_LOCK[];
+ // Whether locking the auto-white balance is supported. "true"
+ // means it is, and "false" or this key not existing means it is
+ // not supported.
+ static const char KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[];
+
+ // The maximum number of metering areas supported. This is the maximum
+ // length of KEY_METERING_AREAS.
+ // Example value: "0" or "2". Read only.
+ static const char KEY_MAX_NUM_METERING_AREAS[];
+ // Current metering areas. Camera driver uses these areas to decide
+ // exposure.
+ //
+ // Before accessing this parameter, apps should check
+ // KEY_MAX_NUM_METERING_AREAS first to know the maximum number of metering
+ // areas first. If the value is 0, metering area is not supported.
+ //
+ // Each metering area is a rectangle with specified weight. The direction is
+ // relative to the sensor orientation, that is, what the sensor sees. The
+ // direction is not affected by the rotation or mirroring of
+ // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates of the rectangle range
+ // from -1000 to 1000. (-1000, -1000) is the upper left point. (1000, 1000)
+ // is the lower right point. The width and height of metering areas cannot
+ // be 0 or negative.
+ //
+ // The fifth element is the weight. Values for weight must range from 1 to
+ // 1000. The weight should be interpreted as a per-pixel weight - all
+ // pixels in the area have the specified weight. This means a small area
+ // with the same weight as a larger area will have less influence on the
+ // metering than the larger area. Metering areas can partially overlap and
+ // the driver will add the weights in the overlap region.
+ //
+ // A special case of all-zero single metering area means driver to decide
+ // the metering area. For example, the driver may use more signals to decide
+ // metering areas and change them dynamically. Apps can set all-zero if they
+ // want the driver to decide metering areas.
+ //
+ // Metering areas are relative to the current field of view (KEY_ZOOM).
+ // No matter what the zoom level is, (-1000,-1000) represents the top of the
+ // currently visible camera frame. The metering area cannot be set to be
+ // outside the current field of view, even when using zoom.
+ //
+ // No matter what metering areas are, the final exposure are compensated
+ // by KEY_EXPOSURE_COMPENSATION.
+ // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
+ static const char KEY_METERING_AREAS[];
+ // Current zoom value.
+ // Example value: "0" or "6". Read/write.
+ static const char KEY_ZOOM[];
+ // Maximum zoom value.
+ // Example value: "6". Read only.
+ static const char KEY_MAX_ZOOM[];
+ // The zoom ratios of all zoom values. The zoom ratio is in 1/100
+ // increments. Ex: a zoom of 3.2x is returned as 320. The number of list
+ // elements is KEY_MAX_ZOOM + 1. The first element is always 100. The last
+ // element is the zoom ratio of zoom value KEY_MAX_ZOOM.
+ // Example value: "100,150,200,250,300,350,400". Read only.
+ static const char KEY_ZOOM_RATIOS[];
+ // Whether zoom is supported. Zoom is supported if the value is "true". Zoom
+ // is not supported if the value is not "true" or the key does not exist.
+ // Example value: "true". Read only.
+ static const char KEY_ZOOM_SUPPORTED[];
+ // Whether if smooth zoom is supported. Smooth zoom is supported if the
+ // value is "true". It is not supported if the value is not "true" or the
+ // key does not exist.
+ // See CAMERA_CMD_START_SMOOTH_ZOOM, CAMERA_CMD_STOP_SMOOTH_ZOOM, and
+ // CAMERA_MSG_ZOOM in frameworks/base/include/camera/Camera.h.
+ // Example value: "true". Read only.
+ static const char KEY_SMOOTH_ZOOM_SUPPORTED[];
+
+ // The distances (in meters) from the camera to where an object appears to
+ // be in focus. The object is sharpest at the optimal focus distance. The
+ // depth of field is the far focus distance minus near focus distance.
+ //
+ // Focus distances may change after starting auto focus, canceling auto
+ // focus, or starting the preview. Applications can read this anytime to get
+ // the latest focus distances. If the focus mode is FOCUS_MODE_CONTINUOUS,
+ // focus distances may change from time to time.
+ //
+ // This is intended to estimate the distance between the camera and the
+ // subject. After autofocus, the subject distance may be within near and far
+ // focus distance. However, the precision depends on the camera hardware,
+ // autofocus algorithm, the focus area, and the scene. The error can be
+ // large and it should be only used as a reference.
+ //
+ // Far focus distance > optimal focus distance > near focus distance. If
+ // the far focus distance is infinity, the value should be "Infinity" (case
+ // sensitive). The format is three float values separated by commas. The
+ // first is near focus distance. The second is optimal focus distance. The
+ // third is far focus distance.
+ // Example value: "0.95,1.9,Infinity" or "0.049,0.05,0.051". Read only.
+ static const char KEY_FOCUS_DISTANCES[];
+
+ // The current dimensions in pixels (width x height) for video frames.
+ // The width and height must be one of the supported sizes retrieved
+ // via KEY_SUPPORTED_VIDEO_SIZES.
+ // Example value: "1280x720". Read/write.
+ static const char KEY_VIDEO_SIZE[];
+ // A list of the supported dimensions in pixels (width x height)
+ // for video frames. See CAMERA_MSG_VIDEO_FRAME for details in
+ // frameworks/base/include/camera/Camera.h.
+ // Example: "176x144,1280x720". Read only.
+ static const char KEY_SUPPORTED_VIDEO_SIZES[];
+
+ // The maximum number of detected faces supported by hardware face
+ // detection. If the value is 0, hardware face detection is not supported.
+ // Example: "5". Read only
+ static const char KEY_MAX_NUM_DETECTED_FACES_HW[];
+
+ // The maximum number of detected faces supported by software face
+ // detection. If the value is 0, software face detection is not supported.
+ // Example: "5". Read only
+ static const char KEY_MAX_NUM_DETECTED_FACES_SW[];
+
+ // Preferred preview frame size in pixels for video recording.
+ // The width and height must be one of the supported sizes retrieved
+ // via KEY_SUPPORTED_PREVIEW_SIZES. This key can be used only when
+ // getSupportedVideoSizes() does not return an empty Vector of Size.
+ // Camcorder applications are recommended to set the preview size
+ // to a value that is not larger than the preferred preview size.
+ // In other words, the product of the width and height of the
+ // preview size should not be larger than that of the preferred
+ // preview size. In addition, we recommend to choos a preview size
+ // that has the same aspect ratio as the resolution of video to be
+ // recorded.
+ // Example value: "800x600". Read only.
+ static const char KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[];
+
+ // The image format for video frames. See CAMERA_MSG_VIDEO_FRAME in
+ // frameworks/base/include/camera/Camera.h.
+ // Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read only.
+ static const char KEY_VIDEO_FRAME_FORMAT[];
+
+ // Sets the hint of the recording mode. If this is true, MediaRecorder.start
+ // may be faster or has less glitches. This should be called before starting
+ // the preview for the best result. But it is allowed to change the hint
+ // while the preview is active. The default value is false.
+ //
+ // The apps can still call Camera.takePicture when the hint is true. The
+ // apps can call MediaRecorder.start when the hint is false. But the
+ // performance may be worse.
+ // Example value: "true" or "false". Read/write.
+ static const char KEY_RECORDING_HINT[];
+
+ // Returns true if video snapshot is supported. That is, applications
+ // can call Camera.takePicture during recording. Applications do not need to
+ // call Camera.startPreview after taking a picture. The preview will be
+ // still active. Other than that, taking a picture during recording is
+ // identical to taking a picture normally. All settings and methods related
+ // to takePicture work identically. Ex: KEY_PICTURE_SIZE,
+ // KEY_SUPPORTED_PICTURE_SIZES, KEY_JPEG_QUALITY, KEY_ROTATION, and etc.
+ // The picture will have an EXIF header. FLASH_MODE_AUTO and FLASH_MODE_ON
+ // also still work, but the video will record the flash.
+ //
+ // Applications can set shutter callback as null to avoid the shutter
+ // sound. It is also recommended to set raw picture and post view callbacks
+ // to null to avoid the interrupt of preview display.
+ //
+ // Field-of-view of the recorded video may be different from that of the
+ // captured pictures.
+ // Example value: "true" or "false". Read only.
+ static const char KEY_VIDEO_SNAPSHOT_SUPPORTED[];
+
+ // The state of the video stabilization. If set to true, both the
+ // preview stream and the recorded video stream are stabilized by
+ // the camera. Only valid to set if KEY_VIDEO_STABILIZATION_SUPPORTED is
+ // set to true.
+ //
+ // The value of this key can be changed any time the camera is
+ // open. If preview or recording is active, it is acceptable for
+ // there to be a slight video glitch when video stabilization is
+ // toggled on and off.
+ //
+ // This only stabilizes video streams (between-frames stabilization), and
+ // has no effect on still image capture.
+ static const char KEY_VIDEO_STABILIZATION[];
+
+ // Returns true if video stabilization is supported. That is, applications
+ // can set KEY_VIDEO_STABILIZATION to true and have a stabilized preview
+ // stream and record stabilized videos.
+ static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
+
+ // Supported modes for special effects with light.
+ // Example values: "lowlight,hdr".
+ static const char KEY_LIGHTFX[];
+
+ // Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
+ static const char TRUE[];
+ static const char FALSE[];
+
+ // Value for KEY_FOCUS_DISTANCES.
+ static const char FOCUS_DISTANCE_INFINITY[];
+
+ // Values for white balance settings.
+ static const char WHITE_BALANCE_AUTO[];
+ static const char WHITE_BALANCE_INCANDESCENT[];
+ static const char WHITE_BALANCE_FLUORESCENT[];
+ static const char WHITE_BALANCE_WARM_FLUORESCENT[];
+ static const char WHITE_BALANCE_DAYLIGHT[];
+ static const char WHITE_BALANCE_CLOUDY_DAYLIGHT[];
+ static const char WHITE_BALANCE_TWILIGHT[];
+ static const char WHITE_BALANCE_SHADE[];
+
+ // Values for effect settings.
+ static const char EFFECT_NONE[];
+ static const char EFFECT_MONO[];
+ static const char EFFECT_NEGATIVE[];
+ static const char EFFECT_SOLARIZE[];
+ static const char EFFECT_SEPIA[];
+ static const char EFFECT_POSTERIZE[];
+ static const char EFFECT_WHITEBOARD[];
+ static const char EFFECT_BLACKBOARD[];
+ static const char EFFECT_AQUA[];
+
+ // Values for antibanding settings.
+ static const char ANTIBANDING_AUTO[];
+ static const char ANTIBANDING_50HZ[];
+ static const char ANTIBANDING_60HZ[];
+ static const char ANTIBANDING_OFF[];
+
+ // Values for flash mode settings.
+ // Flash will not be fired.
+ static const char FLASH_MODE_OFF[];
+ // Flash will be fired automatically when required. The flash may be fired
+ // during preview, auto-focus, or snapshot depending on the driver.
+ static const char FLASH_MODE_AUTO[];
+ // Flash will always be fired during snapshot. The flash may also be
+ // fired during preview or auto-focus depending on the driver.
+ static const char FLASH_MODE_ON[];
+ // Flash will be fired in red-eye reduction mode.
+ static const char FLASH_MODE_RED_EYE[];
+ // Constant emission of light during preview, auto-focus and snapshot.
+ // This can also be used for video recording.
+ static const char FLASH_MODE_TORCH[];
+
+ // Values for scene mode settings.
+ static const char SCENE_MODE_AUTO[];
+ static const char SCENE_MODE_ACTION[];
+ static const char SCENE_MODE_PORTRAIT[];
+ static const char SCENE_MODE_LANDSCAPE[];
+ static const char SCENE_MODE_NIGHT[];
+ static const char SCENE_MODE_NIGHT_PORTRAIT[];
+ static const char SCENE_MODE_THEATRE[];
+ static const char SCENE_MODE_BEACH[];
+ static const char SCENE_MODE_SNOW[];
+ static const char SCENE_MODE_SUNSET[];
+ static const char SCENE_MODE_STEADYPHOTO[];
+ static const char SCENE_MODE_FIREWORKS[];
+ static const char SCENE_MODE_SPORTS[];
+ static const char SCENE_MODE_PARTY[];
+ static const char SCENE_MODE_CANDLELIGHT[];
+ // Applications are looking for a barcode. Camera driver will be optimized
+ // for barcode reading.
+ static const char SCENE_MODE_BARCODE[];
+ // A high-dynamic range mode. In this mode, the HAL module will use a
+ // capture strategy that extends the dynamic range of the captured
+ // image in some fashion. Only the final image is returned.
+ static const char SCENE_MODE_HDR[];
+
+ // Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT,
+ // and KEY_VIDEO_FRAME_FORMAT
+ static const char PIXEL_FORMAT_YUV422SP[];
+ static const char PIXEL_FORMAT_YUV420SP[]; // NV21
+ static const char PIXEL_FORMAT_YUV422I[]; // YUY2
+ static const char PIXEL_FORMAT_YUV420P[]; // YV12
+ static const char PIXEL_FORMAT_RGB565[];
+ static const char PIXEL_FORMAT_RGBA8888[];
+ static const char PIXEL_FORMAT_JPEG[];
+ // Raw bayer format used for images, which is 10 bit precision samples
+ // stored in 16 bit words. The filter pattern is RGGB.
+ static const char PIXEL_FORMAT_BAYER_RGGB[];
+ // Pixel format is not known to the framework
+ static const char PIXEL_FORMAT_ANDROID_OPAQUE[];
+
+ // Values for focus mode settings.
+ // Auto-focus mode. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus in this mode.
+ static const char FOCUS_MODE_AUTO[];
+ // Focus is set at infinity. Applications should not call
+ // CameraHardwareInterface.autoFocus in this mode.
+ static const char FOCUS_MODE_INFINITY[];
+ // Macro (close-up) focus mode. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus in this mode.
+ static const char FOCUS_MODE_MACRO[];
+ // Focus is fixed. The camera is always in this mode if the focus is not
+ // adjustable. If the camera has auto-focus, this mode can fix the
+ // focus, which is usually at hyperfocal distance. Applications should
+ // not call CameraHardwareInterface.autoFocus in this mode.
+ static const char FOCUS_MODE_FIXED[];
+ // Extended depth of field (EDOF). Focusing is done digitally and
+ // continuously. Applications should not call
+ // CameraHardwareInterface.autoFocus in this mode.
+ static const char FOCUS_MODE_EDOF[];
+ // Continuous auto focus mode intended for video recording. The camera
+ // continuously tries to focus. This is the best choice for video
+ // recording because the focus changes smoothly . Applications still can
+ // call CameraHardwareInterface.takePicture in this mode but the subject may
+ // not be in focus. Auto focus starts when the parameter is set.
+ //
+ // Applications can call CameraHardwareInterface.autoFocus in this mode. The
+ // focus callback will immediately return with a boolean that indicates
+ // whether the focus is sharp or not. The focus position is locked after
+ // autoFocus call. If applications want to resume the continuous focus,
+ // cancelAutoFocus must be called. Restarting the preview will not resume
+ // the continuous autofocus. To stop continuous focus, applications should
+ // change the focus mode to other modes.
+ static const char FOCUS_MODE_CONTINUOUS_VIDEO[];
+ // Continuous auto focus mode intended for taking pictures. The camera
+ // continuously tries to focus. The speed of focus change is more aggressive
+ // than FOCUS_MODE_CONTINUOUS_VIDEO. Auto focus starts when the parameter is
+ // set.
+ //
+ // Applications can call CameraHardwareInterface.autoFocus in this mode. If
+ // the autofocus is in the middle of scanning, the focus callback will
+ // return when it completes. If the autofocus is not scanning, focus
+ // callback will immediately return with a boolean that indicates whether
+ // the focus is sharp or not. The apps can then decide if they want to take
+ // a picture immediately or to change the focus mode to auto, and run a full
+ // autofocus cycle. The focus position is locked after autoFocus call. If
+ // applications want to resume the continuous focus, cancelAutoFocus must be
+ // called. Restarting the preview will not resume the continuous autofocus.
+ // To stop continuous focus, applications should change the focus mode to
+ // other modes.
+ static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+
+ // Values for light special effects
+ // Low-light enhancement mode
+ static const char LIGHTFX_LOWLIGHT[];
+ // High-dynamic range mode
+ static const char LIGHTFX_HDR[];
+
+ /**
+ * Returns the the supported preview formats as an enum given in graphics.h
+ * corrsponding to the format given in the input string or -1 if no such
+ * conversion exists.
+ */
+ static int previewFormatToEnum(const char* format);
+
+private:
+ DefaultKeyedVector<String8,String8> mMap;
+};
+
+}; // namespace android
+
+#endif
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index ce195f8..598127f 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -17,9 +17,16 @@
#define LOG_TAG "camera_hidl_hal_test"
#include <android/hardware/camera/provider/2.4/ICameraProvider.h>
#include <android/hardware/camera/device/3.2/ICameraDevice.h>
+#include <android/hardware/camera/device/1.0/ICameraDevice.h>
+#include "CameraParameters.h"
+#include <system/camera.h>
#include <android/log.h>
#include <ui/GraphicBuffer.h>
#include <VtsHalHidlTargetTestBase.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+#include <gui/BufferItemConsumer.h>
+#include <binder/MemoryHeapBase.h>
#include <regex>
#include "system/camera_metadata.h"
#include <hardware/gralloc.h>
@@ -29,6 +36,7 @@
#include <condition_variable>
#include <chrono>
#include <inttypes.h>
+#include <utils/Errors.h>
using ::android::hardware::Return;
using ::android::hardware::Void;
@@ -36,14 +44,23 @@
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::sp;
+using ::android::wp;
using ::android::GraphicBuffer;
+using ::android::IGraphicBufferProducer;
+using ::android::IGraphicBufferConsumer;
+using ::android::BufferQueue;
+using ::android::BufferItemConsumer;
+using ::android::Surface;
+using ::android::CameraParameters;
using ::android::hardware::graphics::common::V1_0::PixelFormat;
+using ::android::hardware::graphics::allocator::V2_0::ProducerUsage;
using ::android::hardware::camera::common::V1_0::Status;
using ::android::hardware::camera::common::V1_0::CameraDeviceStatus;
using ::android::hardware::camera::common::V1_0::TorchMode;
using ::android::hardware::camera::common::V1_0::TorchModeStatus;
using ::android::hardware::camera::provider::V2_4::ICameraProvider;
using ::android::hardware::camera::provider::V2_4::ICameraProviderCallback;
+using ::android::hardware::camera::device::V3_2::ICameraDevice;
using ::android::hardware::camera::device::V3_2::CaptureRequest;
using ::android::hardware::camera::device::V3_2::CaptureResult;
using ::android::hardware::camera::device::V3_2::ICameraDeviceCallback;
@@ -59,14 +76,27 @@
using ::android::hardware::camera::device::V3_2::HalStreamConfiguration;
using ::android::hardware::camera::device::V3_2::BufferStatus;
using ::android::hardware::camera::device::V3_2::StreamBuffer;
+using ::android::hardware::camera::device::V3_2::MsgType;
+using ::android::hardware::camera::device::V3_2::ErrorMsg;
+using ::android::hardware::camera::device::V3_2::ErrorCode;
+using ::android::hardware::camera::device::V1_0::CameraFacing;
+using ::android::hardware::camera::device::V1_0::NotifyCallbackMsg;
+using ::android::hardware::camera::device::V1_0::CommandType;
+using ::android::hardware::camera::device::V1_0::DataCallbackMsg;
+using ::android::hardware::camera::device::V1_0::CameraFrameMetadata;
+using ::android::hardware::camera::device::V1_0::ICameraDevicePreviewCallback;
+using ::android::hardware::camera::device::V1_0::FrameCallbackFlag;
-#define CAMERA_PASSTHROUGH_SERVICE_NAME "legacy/0"
-#define MAX_PREVIEW_WIDTH 1920
-#define MAX_PREVIEW_HEIGHT 1080
-#define MAX_VIDEO_WIDTH 4096
-#define MAX_VIDEO_HEIGHT 2160
-#define STREAM_BUFFER_TIMEOUT 3 // sec.
-#define DUMP_OUTPUT "/dev/null"
+const char kCameraPassthroughServiceName[] = "legacy/0";
+const uint32_t kMaxPreviewWidth = 1920;
+const uint32_t kMaxPreviewHeight = 1080;
+const uint32_t kMaxVideoWidth = 4096;
+const uint32_t kMaxVideoHeight = 2160;
+const int64_t kStreamBufferTimeoutSec = 3;
+const int64_t kAutoFocusTimeoutSec = 5;
+const int64_t kTorchTimeoutSec = 1;
+const int64_t kEmptyFlushTimeoutMSec = 200;
+const char kDumpOutput[] = "/dev/null";
struct AvailableStream {
int32_t width;
@@ -99,14 +129,36 @@
if (!match) {
return -1;
}
- if (sm[1].compare(kHAL3_2) == 0) {
+ std::string version = sm[1].str();
+ if (version.compare(kHAL3_2) == 0) {
// maybe switched to 3.4 or define the hidl version enumlater
return CAMERA_DEVICE_API_VERSION_3_2;
- } else if (sm[1].compare(kHAL1_0) == 0) {
+ } else if (version.compare(kHAL1_0) == 0) {
return CAMERA_DEVICE_API_VERSION_1_0;
}
return 0;
}
+
+ Status mapToStatus(::android::status_t s) {
+ switch(s) {
+ case ::android::OK:
+ return Status::OK ;
+ case ::android::BAD_VALUE:
+ return Status::ILLEGAL_ARGUMENT ;
+ case -EBUSY:
+ return Status::CAMERA_IN_USE;
+ case -EUSERS:
+ return Status::MAX_CAMERAS_IN_USE;
+ case ::android::UNKNOWN_TRANSACTION:
+ return Status::METHOD_NOT_SUPPORTED;
+ case ::android::INVALID_OPERATION:
+ return Status::OPERATION_NOT_SUPPORTED;
+ case ::android::DEAD_OBJECT:
+ return Status::CAMERA_DISCONNECTED;
+ }
+ ALOGW("Unexpected HAL status code %d", s);
+ return Status::OPERATION_NOT_SUPPORTED;
+ }
}
// Test environment for camera
@@ -131,7 +183,7 @@
void CameraHidlEnvironment::SetUp() {
// TODO: test the binderized mode
- mProvider = ::testing::VtsHalHidlTargetTestBase::getService<ICameraProvider>(CAMERA_PASSTHROUGH_SERVICE_NAME);
+ mProvider = ::testing::VtsHalHidlTargetTestBase::getService<ICameraProvider>(kCameraPassthroughServiceName);
// TODO: handle the device doesn't have any camera case
ALOGI_IF(mProvider, "provider is not nullptr, %p", mProvider.get());
ASSERT_NE(mProvider, nullptr);
@@ -141,6 +193,249 @@
ALOGI("TearDown CameraHidlEnvironment");
}
+struct BufferItemHander: public BufferItemConsumer::FrameAvailableListener {
+ BufferItemHander(wp<BufferItemConsumer> consumer) : mConsumer(consumer) {}
+
+ void onFrameAvailable(const android::BufferItem&) override {
+ sp<BufferItemConsumer> consumer = mConsumer.promote();
+ ASSERT_NE(nullptr, consumer.get());
+
+ android::BufferItem buffer;
+ ASSERT_EQ(android::OK, consumer->acquireBuffer(&buffer, 0));
+ ASSERT_EQ(android::OK, consumer->releaseBuffer(buffer));
+ }
+
+ private:
+ wp<BufferItemConsumer> mConsumer;
+};
+
+struct PreviewWindowCb : public ICameraDevicePreviewCallback {
+ PreviewWindowCb(sp<ANativeWindow> anw) : mPreviewWidth(0),
+ mPreviewHeight(0), mFormat(0), mPreviewUsage(0),
+ mPreviewSwapInterval(-1), mCrop{-1, -1, -1, -1}, mAnw(anw) {}
+
+ using dequeueBuffer_cb =
+ std::function<void(Status status, uint64_t bufferId,
+ const hidl_handle& buffer, uint32_t stride)>;
+ Return<void> dequeueBuffer(dequeueBuffer_cb _hidl_cb) override;
+
+ Return<Status> enqueueBuffer(uint64_t bufferId) override;
+
+ Return<Status> cancelBuffer(uint64_t bufferId) override;
+
+ Return<Status> setBufferCount(uint32_t count) override;
+
+ Return<Status> setBuffersGeometry(uint32_t w,
+ uint32_t h, PixelFormat format) override;
+
+ Return<Status> setCrop(int32_t left, int32_t top,
+ int32_t right, int32_t bottom) override;
+
+ Return<Status> setUsage(ProducerUsage usage) override;
+
+ Return<Status> setSwapInterval(int32_t interval) override;
+
+ using getMinUndequeuedBufferCount_cb =
+ std::function<void(Status status, uint32_t count)>;
+ Return<void> getMinUndequeuedBufferCount(
+ getMinUndequeuedBufferCount_cb _hidl_cb) override;
+
+ Return<Status> setTimestamp(int64_t timestamp) override;
+
+ private:
+ struct BufferHasher {
+ size_t operator()(const buffer_handle_t& buf) const {
+ if (buf == nullptr)
+ return 0;
+
+ size_t result = 1;
+ result = 31 * result + buf->numFds;
+ result = 31 * result + buf->numInts;
+ int length = buf->numFds + buf->numInts;
+ for (int i = 0; i < length; i++) {
+ result = 31 * result + buf->data[i];
+ }
+ return result;
+ }
+ };
+
+ struct BufferComparator {
+ bool operator()(const buffer_handle_t& buf1,
+ const buffer_handle_t& buf2) const {
+ if ((buf1->numFds == buf2->numFds) &&
+ (buf1->numInts == buf2->numInts)) {
+ int length = buf1->numFds + buf1->numInts;
+ for (int i = 0; i < length; i++) {
+ if (buf1->data[i] != buf2->data[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+ };
+
+ std::pair<bool, uint64_t> getBufferId(ANativeWindowBuffer* anb);
+ void cleanupCirculatingBuffers();
+
+ std::mutex mBufferIdMapLock; // protecting mBufferIdMap and mNextBufferId
+ typedef std::unordered_map<const buffer_handle_t, uint64_t,
+ BufferHasher, BufferComparator> BufferIdMap;
+
+ BufferIdMap mBufferIdMap; // stream ID -> per stream buffer ID map
+ std::unordered_map<uint64_t, ANativeWindowBuffer*> mReversedBufMap;
+ uint64_t mNextBufferId = 1;
+
+ uint32_t mPreviewWidth, mPreviewHeight;
+ int mFormat, mPreviewUsage;
+ int32_t mPreviewSwapInterval;
+ android_native_rect_t mCrop;
+ sp<ANativeWindow> mAnw; //Native window reference
+};
+
+std::pair<bool, uint64_t> PreviewWindowCb::getBufferId(
+ ANativeWindowBuffer* anb) {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+
+ buffer_handle_t& buf = anb->handle;
+ auto it = mBufferIdMap.find(buf);
+ if (it == mBufferIdMap.end()) {
+ uint64_t bufId = mNextBufferId++;
+ mBufferIdMap[buf] = bufId;
+ mReversedBufMap[bufId] = anb;
+ return std::make_pair(true, bufId);
+ } else {
+ return std::make_pair(false, it->second);
+ }
+}
+
+void PreviewWindowCb::cleanupCirculatingBuffers() {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+ mBufferIdMap.clear();
+ mReversedBufMap.clear();
+}
+
+Return<void> PreviewWindowCb::dequeueBuffer(dequeueBuffer_cb _hidl_cb) {
+ ANativeWindowBuffer* anb;
+ auto rc = native_window_dequeue_buffer_and_wait(mAnw.get(), &anb);
+ uint64_t bufferId = 0;
+ uint32_t stride = 0;
+ hidl_handle buf = nullptr;
+ if (rc == ::android::OK) {
+ auto pair = getBufferId(anb);
+ buf = (pair.first) ? anb->handle : nullptr;
+ bufferId = pair.second;
+ stride = anb->stride;
+ }
+
+ _hidl_cb(mapToStatus(rc), bufferId, buf, stride);
+ return Void();
+}
+
+Return<Status> PreviewWindowCb::enqueueBuffer(uint64_t bufferId) {
+ if (mReversedBufMap.count(bufferId) == 0) {
+ ALOGE("%s: bufferId %" PRIu64 " not found", __FUNCTION__, bufferId);
+ return Status::ILLEGAL_ARGUMENT;
+ }
+ return mapToStatus(mAnw->queueBuffer(mAnw.get(),
+ mReversedBufMap.at(bufferId), -1));
+}
+
+Return<Status> PreviewWindowCb::cancelBuffer(uint64_t bufferId) {
+ if (mReversedBufMap.count(bufferId) == 0) {
+ ALOGE("%s: bufferId %" PRIu64 " not found", __FUNCTION__, bufferId);
+ return Status::ILLEGAL_ARGUMENT;
+ }
+ return mapToStatus(mAnw->cancelBuffer(mAnw.get(),
+ mReversedBufMap.at(bufferId), -1));
+}
+
+Return<Status> PreviewWindowCb::setBufferCount(uint32_t count) {
+ if (mAnw.get() != nullptr) {
+ // WAR for b/27039775
+ native_window_api_disconnect(mAnw.get(), NATIVE_WINDOW_API_CAMERA);
+ native_window_api_connect(mAnw.get(), NATIVE_WINDOW_API_CAMERA);
+ if (mPreviewWidth != 0) {
+ native_window_set_buffers_dimensions(mAnw.get(),
+ mPreviewWidth, mPreviewHeight);
+ native_window_set_buffers_format(mAnw.get(), mFormat);
+ }
+ if (mPreviewUsage != 0) {
+ native_window_set_usage(mAnw.get(), mPreviewUsage);
+ }
+ if (mPreviewSwapInterval >= 0) {
+ mAnw->setSwapInterval(mAnw.get(), mPreviewSwapInterval);
+ }
+ if (mCrop.left >= 0) {
+ native_window_set_crop(mAnw.get(), &(mCrop));
+ }
+ }
+
+ auto rc = native_window_set_buffer_count(mAnw.get(), count);
+ if (rc == ::android::OK) {
+ cleanupCirculatingBuffers();
+ }
+
+ return mapToStatus(rc);
+}
+
+Return<Status> PreviewWindowCb::setBuffersGeometry(uint32_t w, uint32_t h,
+ PixelFormat format) {
+ auto rc = native_window_set_buffers_dimensions(mAnw.get(), w, h);
+ if (rc == ::android::OK) {
+ mPreviewWidth = w;
+ mPreviewHeight = h;
+ rc = native_window_set_buffers_format(mAnw.get(),
+ static_cast<int>(format));
+ if (rc == ::android::OK) {
+ mFormat = static_cast<int>(format);
+ }
+ }
+
+ return mapToStatus(rc);
+}
+
+Return<Status> PreviewWindowCb::setCrop(int32_t left, int32_t top,
+ int32_t right, int32_t bottom) {
+ android_native_rect_t crop = { left, top, right, bottom };
+ auto rc = native_window_set_crop(mAnw.get(), &crop);
+ if (rc == ::android::OK) {
+ mCrop = crop;
+ }
+ return mapToStatus(rc);
+}
+
+Return<Status> PreviewWindowCb::setUsage(ProducerUsage usage) {
+ auto rc = native_window_set_usage(mAnw.get(), static_cast<int>(usage));
+ if (rc == ::android::OK) {
+ mPreviewUsage = static_cast<int>(usage);
+ }
+ return mapToStatus(rc);
+}
+
+Return<Status> PreviewWindowCb::setSwapInterval(int32_t interval) {
+ auto rc = mAnw->setSwapInterval(mAnw.get(), interval);
+ if (rc == ::android::OK) {
+ mPreviewSwapInterval = interval;
+ }
+ return mapToStatus(rc);
+}
+
+Return<void> PreviewWindowCb::getMinUndequeuedBufferCount(
+ getMinUndequeuedBufferCount_cb _hidl_cb) {
+ int count = 0;
+ auto rc = mAnw->query(mAnw.get(),
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &count);
+ _hidl_cb(mapToStatus(rc), count);
+ return Void();
+}
+
+Return<Status> PreviewWindowCb::setTimestamp(int64_t timestamp) {
+ return mapToStatus(native_window_set_buffers_timestamp(mAnw.get(),
+ timestamp));
+}
+
// The main test class for camera HIDL HAL.
class CameraHidlTest : public ::testing::VtsHalHidlTargetTestBase {
public:
@@ -172,9 +467,88 @@
CameraHidlTest *mParent; // Parent object
};
+ struct TorchProviderCb : public ICameraProviderCallback {
+ TorchProviderCb(CameraHidlTest *parent) : mParent(parent) {}
+ virtual Return<void> cameraDeviceStatusChange(
+ const hidl_string&, CameraDeviceStatus) override {
+ return Void();
+ }
+
+ virtual Return<void> torchModeStatusChange(
+ const hidl_string&, TorchModeStatus newStatus) override {
+ std::lock_guard<std::mutex> l(mParent->mTorchLock);
+ mParent->mTorchStatus = newStatus;
+ mParent->mTorchCond.notify_one();
+ return Void();
+ }
+
+ private:
+ CameraHidlTest *mParent; // Parent object
+ };
+
+ struct Camera1DeviceCb :
+ public ::android::hardware::camera::device::V1_0::ICameraDeviceCallback {
+ Camera1DeviceCb(CameraHidlTest *parent) : mParent(parent) {}
+
+ Return<void> notifyCallback(NotifyCallbackMsg msgType,
+ int32_t ext1, int32_t ext2) override;
+
+ Return<uint32_t> registerMemory(const hidl_handle& descriptor,
+ uint32_t bufferSize, uint32_t bufferCount) override;
+
+ Return<void> unregisterMemory(uint32_t memId) override;
+
+ Return<void> dataCallback(DataCallbackMsg msgType,
+ uint32_t data, uint32_t bufferIndex,
+ const CameraFrameMetadata& metadata) override;
+
+ Return<void> dataCallbackTimestamp(DataCallbackMsg msgType,
+ uint32_t data, uint32_t bufferIndex,
+ int64_t timestamp) override;
+
+ Return<void> handleCallbackTimestamp(DataCallbackMsg msgType,
+ const hidl_handle& frameData,uint32_t data,
+ uint32_t bufferIndex, int64_t timestamp) override;
+
+ private:
+ CameraHidlTest *mParent; // Parent object
+ };
+
+ void openCameraDevice(const std::string &name,const CameraHidlEnvironment* env,
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> *device /*out*/);
+ void setupPreviewWindow(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device,
+ sp<BufferItemConsumer> *bufferItemConsumer /*out*/,
+ sp<BufferItemHander> *bufferHandler /*out*/);
+ void stopPreviewAndClose(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device);
+ void startPreview(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device);
+ void enableMsgType(unsigned int msgType,
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device);
+ void disableMsgType(unsigned int msgType,
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device);
+ void getParameters(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device,
+ CameraParameters *cameraParams /*out*/);
+ void setParameters(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device,
+ const CameraParameters &cameraParams);
+ void waitForFrameLocked(DataCallbackMsg msgFrame,
+ std::unique_lock<std::mutex> &l);
+ void openEmptyDeviceSession(const std::string &name,
+ const CameraHidlEnvironment* env,
+ sp<ICameraDeviceSession> *session /*out*/,
+ camera_metadata_t **staticMeta /*out*/);
+ void configurePreviewStream(const std::string &name,
+ const CameraHidlEnvironment* env,
+ const AvailableStream *previewThreshold,
+ sp<ICameraDeviceSession> *session /*out*/,
+ Stream *previewStream /*out*/,
+ HalStreamConfiguration *halStreamConfig /*out*/);
static Status getAvailableOutputStreams(camera_metadata_t *staticMeta,
std::vector<AvailableStream> &outputStreams,
- AvailableStream *threshold = nullptr);
+ const AvailableStream *threshold = nullptr);
static Status isConstrainedModeAvailable(camera_metadata_t *staticMeta);
static Status pickConstrainedModeSize(camera_metadata_t *staticMeta,
AvailableStream &hfrStream);
@@ -184,14 +558,114 @@
static Status findLargestSize(
const std::vector<AvailableStream> &streamSizes,
int32_t format, AvailableStream &result);
+ static Status isAutoFocusModeAvailable(
+ ::android::CameraParameters &cameraParams, const char *mode) ;
protected:
std::mutex mLock; // Synchronize access to member variables
std::condition_variable mResultCondition; // Condition variable for incoming results
uint32_t mResultFrameNumber; // Expected result frame number
std::vector<StreamBuffer> mResultBuffers; // Holds stream buffers from capture result
+ std::vector<ErrorMsg> mErrors; // Holds incoming error notifications
+ DataCallbackMsg mDataMessageTypeReceived; // Most recent message type received through data callbacks
+ uint32_t mVideoBufferIndex; // Buffer index of the most recent video buffer
+ uint32_t mVideoData; // Buffer data of the most recent video buffer
+ hidl_handle mVideoNativeHandle; // Most recent video buffer native handle
+ NotifyCallbackMsg mNotifyMessage; // Current notification message
+
+ std::mutex mTorchLock; // Synchronize access to torch status
+ std::condition_variable mTorchCond; // Condition variable for torch status
+ TorchModeStatus mTorchStatus; // Current torch status
+
+ // Holds camera registered buffers
+ std::unordered_map<uint32_t, sp<::android::MemoryHeapBase> > mMemoryPool;
};
+Return<void> CameraHidlTest::Camera1DeviceCb::notifyCallback(
+ NotifyCallbackMsg msgType, int32_t ext1 __unused,
+ int32_t ext2 __unused) {
+ std::unique_lock<std::mutex> l(mParent->mLock);
+ mParent->mNotifyMessage = msgType;
+ mParent->mResultCondition.notify_one();
+
+ return Void();
+}
+
+Return<uint32_t> CameraHidlTest::Camera1DeviceCb::registerMemory(
+ const hidl_handle& descriptor, uint32_t bufferSize,
+ uint32_t bufferCount) {
+ if (descriptor->numFds != 1) {
+ ADD_FAILURE() << "camera memory descriptor has"
+ " numFds " << descriptor->numFds << " (expect 1)" ;
+ return 0;
+ }
+ if (descriptor->data[0] < 0) {
+ ADD_FAILURE() << "camera memory descriptor has"
+ " FD " << descriptor->data[0] << " (expect >= 0)";
+ return 0;
+ }
+
+ sp<::android::MemoryHeapBase> pool = new ::android::MemoryHeapBase(
+ descriptor->data[0], bufferSize*bufferCount, 0, 0);
+ mParent->mMemoryPool.emplace(pool->getHeapID(), pool);
+
+ return pool->getHeapID();
+}
+
+Return<void> CameraHidlTest::Camera1DeviceCb::unregisterMemory(uint32_t memId) {
+ if (mParent->mMemoryPool.count(memId) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, memId);
+ ADD_FAILURE();
+ return Void();
+ }
+
+ mParent->mMemoryPool.erase(memId);
+ return Void();
+}
+
+Return<void> CameraHidlTest::Camera1DeviceCb::dataCallback(
+ DataCallbackMsg msgType __unused, uint32_t data __unused,
+ uint32_t bufferIndex __unused,
+ const CameraFrameMetadata& metadata __unused) {
+ std::unique_lock<std::mutex> l(mParent->mLock);
+ mParent->mDataMessageTypeReceived = msgType;
+ mParent->mResultCondition.notify_one();
+
+ return Void();
+}
+
+Return<void> CameraHidlTest::Camera1DeviceCb::dataCallbackTimestamp(
+ DataCallbackMsg msgType, uint32_t data,
+ uint32_t bufferIndex, int64_t timestamp __unused) {
+ std::unique_lock<std::mutex> l(mParent->mLock);
+ mParent->mDataMessageTypeReceived = msgType;
+ mParent->mVideoBufferIndex = bufferIndex;
+ if (mParent->mMemoryPool.count(data) == 0) {
+ ADD_FAILURE() << "memory pool ID " << data << "not found";
+ }
+ mParent->mVideoData = data;
+ mParent->mResultCondition.notify_one();
+
+ return Void();
+}
+
+Return<void> CameraHidlTest::Camera1DeviceCb::handleCallbackTimestamp(
+ DataCallbackMsg msgType, const hidl_handle& frameData,
+ uint32_t data __unused, uint32_t bufferIndex,
+ int64_t timestamp __unused) {
+ std::unique_lock<std::mutex> l(mParent->mLock);
+ mParent->mDataMessageTypeReceived = msgType;
+ mParent->mVideoBufferIndex = bufferIndex;
+ if (mParent->mMemoryPool.count(data) == 0) {
+ ADD_FAILURE() << "memory pool ID " << data << " not found";
+ }
+ mParent->mVideoData = data;
+ mParent->mVideoNativeHandle = frameData;
+ mParent->mResultCondition.notify_one();
+
+ return Void();
+}
+
Return<void> CameraHidlTest::DeviceCb::processCaptureResult(
const CaptureResult& result) {
if (nullptr == mParent) {
@@ -221,16 +695,28 @@
}
Return<void> CameraHidlTest::DeviceCb::notify(
- const NotifyMsg& /*msg*/) {
- // TODO(epeev): Pending implementation.
- ALOGI("notify callback");
+ const NotifyMsg& message) {
+
+ if (MsgType::ERROR == message.type) {
+ {
+ std::lock_guard<std::mutex> l(mParent->mLock);
+ mParent->mErrors.push_back(message.msg.error);
+ }
+
+ if ((ErrorCode::ERROR_REQUEST == message.msg.error.errorCode)
+ || (ErrorCode::ERROR_BUFFER == message.msg.error.errorCode)) {
+ mParent->mResultCondition.notify_one();
+ }
+ }
+
return Void();
}
hidl_vec<hidl_string> CameraHidlTest::getCameraDeviceNames() {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames;
- env->mProvider->getCameraIdList(
+ Return<void> ret;
+ ret = env->mProvider->getCameraIdList(
[&](auto status, const auto& idList) {
ALOGI("getCameraIdList returns status:%d", (int)status);
for (size_t i = 0; i < idList.size(); i++) {
@@ -239,22 +725,28 @@
ASSERT_EQ(Status::OK, status);
cameraDeviceNames = idList;
});
+ if (!ret.isOk()) {
+ ADD_FAILURE();
+ }
return cameraDeviceNames;
}
// Test if ICameraProvider::isTorchModeSupported returns Status::OK
TEST_F(CameraHidlTest, isTorchModeSupported) {
- CameraHidlEnvironment::Instance()->mProvider->isSetTorchModeSupported(
+ Return<void> ret;
+ ret = CameraHidlEnvironment::Instance()->mProvider->isSetTorchModeSupported(
[&](auto status, bool support) {
ALOGI("isSetTorchModeSupported returns status:%d supported:%d",
(int)status, support);
ASSERT_EQ(Status::OK, status);
});
+ ASSERT_TRUE(ret.isOk());
}
// TODO: consider removing this test if getCameraDeviceNames() has the same coverage
TEST_F(CameraHidlTest, getCameraIdList) {
- CameraHidlEnvironment::Instance()->mProvider->getCameraIdList(
+ Return<void> ret;
+ ret = CameraHidlEnvironment::Instance()->mProvider->getCameraIdList(
[&](auto status, const auto& idList) {
ALOGI("getCameraIdList returns status:%d", (int)status);
for (size_t i = 0; i < idList.size(); i++) {
@@ -265,11 +757,13 @@
// Not necessary hold for external cameras providers
ASSERT_GT(idList.size(), 0u);
});
+ ASSERT_TRUE(ret.isOk());
}
// Test if ICameraProvider::getVendorTags returns Status::OK
TEST_F(CameraHidlTest, getVendorTags) {
- CameraHidlEnvironment::Instance()->mProvider->getVendorTags(
+ Return<void> ret;
+ ret = CameraHidlEnvironment::Instance()->mProvider->getVendorTags(
[&](auto status, const auto& vendorTagSecs) {
ALOGI("getVendorTags returns status:%d numSections %zu",
(int)status, vendorTagSecs.size());
@@ -286,6 +780,7 @@
}
ASSERT_EQ(Status::OK, status);
});
+ ASSERT_TRUE(ret.isOk());
}
// Test if ICameraProvider::setCallback returns Status::OK
@@ -310,25 +805,36 @@
};
sp<ProviderCb> cb = new ProviderCb;
auto status = env->mProvider->setCallback(cb);
+ ASSERT_TRUE(status.isOk());
ASSERT_EQ(Status::OK, status);
- // TODO: right now no callbacks are fired because there is no external camera
- // or torch mode change. Need to test torch API in CameraDevice test later.
}
-// Test if ICameraProvider::getCameraDeviceInterface_V3_x returns Status::OK and non-null device
-TEST_F(CameraHidlTest, getCameraDeviceInterface_V3_x) {
+// Test if ICameraProvider::getCameraDeviceInterface returns Status::OK and non-null device
+TEST_F(CameraHidlTest, getCameraDeviceInterface) {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- env->mProvider->getCameraDeviceInterface_V3_x(
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device3_2) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
ASSERT_EQ(Status::OK, status);
ASSERT_NE(device3_2, nullptr);
});
+ ASSERT_TRUE(ret.isOk());
+ } else if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device1) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device1, nullptr);
+ });
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -343,7 +849,8 @@
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
ALOGI("getResourceCost: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
@@ -351,8 +858,9 @@
ASSERT_NE(device, nullptr);
device3_2 = device;
});
+ ASSERT_TRUE(ret.isOk());
- device3_2->getResourceCost(
+ ret = device3_2->getResourceCost(
[&](auto status, const auto& resourceCost) {
ALOGI("getResourceCost returns status:%d", (int)status);
ASSERT_EQ(Status::OK, status);
@@ -362,6 +870,759 @@
ALOGI(" Conflicting device: %s", name.c_str());
}
});
+ ASSERT_TRUE(ret.isOk());
+ } else {
+ ::android::sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ ALOGI("getResourceCost: Testing camera device %s", name.c_str());
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device1 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ ret = device1->getResourceCost(
+ [&](auto status, const auto& resourceCost) {
+ ALOGI("getResourceCost returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ALOGI(" Resource cost is %d", resourceCost.resourceCost);
+ ASSERT_LE(resourceCost.resourceCost, 100u);
+ for (const auto& name : resourceCost.conflictingDevices) {
+ ALOGI(" Conflicting device: %s", name.c_str());
+ }
+ });
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Verify that the static camera info can be retrieved
+// successfully.
+TEST_F(CameraHidlTest, getCameraInfo) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ ::android::sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ ALOGI("getCameraCharacteristics: Testing camera device %s", name.c_str());
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device1 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ ret = device1->getCameraInfo(
+ [&](auto status, const auto& info) {
+ ALOGI("getCameraInfo returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ switch(info.orientation) {
+ case 0:
+ case 90:
+ case 180:
+ case 270:
+ //Expected cases
+ ALOGI("camera orientation: %d", info.orientation);
+ break;
+ default:
+ FAIL() << "Unexpected camera orientation:" << info.orientation;
+ }
+ switch(info.facing) {
+ case CameraFacing::BACK:
+ case CameraFacing::FRONT:
+ case CameraFacing::EXTERNAL:
+ //Expected cases
+ ALOGI("camera facing: %d", info.facing);
+ break;
+ default:
+ FAIL() << "Unexpected camera facing:" << static_cast<uint32_t> (info.facing);
+ }
+ });
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Check whether preview window can be configured
+TEST_F(CameraHidlTest, setPreviewWindow) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1,
+ &bufferItemConsumer /*out*/, &bufferHandler /*out*/);
+
+ Return<void> ret;
+ ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Verify that setting preview window fails in case device is not open
+TEST_F(CameraHidlTest, setPreviewWindowInvalid) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ ::android::sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ ALOGI("getCameraCharacteristics: Testing camera device %s", name.c_str());
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device1 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ Return<Status> returnStatus = device1->setPreviewWindow(nullptr);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OPERATION_NOT_SUPPORTED, returnStatus);
+ }
+ }
+}
+
+// Start and stop preview checking whether it gets enabled in between.
+TEST_F(CameraHidlTest, startStopPreview) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1,
+ &bufferItemConsumer /*out*/, &bufferHandler /*out*/);
+
+ startPreview(device1);
+
+ Return<bool> returnBoolStatus = device1->previewEnabled();
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_TRUE(returnBoolStatus);
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Start preview without active preview window. Preview should start as soon
+// as a valid active window gets configured.
+TEST_F(CameraHidlTest, startStopPreviewDelayed) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ Return<Status> returnStatus = device1->setPreviewWindow(nullptr);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ startPreview(device1);
+
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+
+ //Preview should get enabled now
+ Return<bool> returnBoolStatus = device1->previewEnabled();
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_TRUE(returnBoolStatus);
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Verify that image capture behaves as expected along with preview callbacks.
+TEST_F(CameraHidlTest, takePicture) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mDataMessageTypeReceived = DataCallbackMsg::RAW_IMAGE_NOTIFY;
+ }
+
+ enableMsgType((unsigned int)DataCallbackMsg::PREVIEW_FRAME, device1);
+ startPreview(device1);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ waitForFrameLocked(DataCallbackMsg::PREVIEW_FRAME, l);
+ }
+
+ disableMsgType((unsigned int)DataCallbackMsg::PREVIEW_FRAME,
+ device1);
+ enableMsgType((unsigned int)DataCallbackMsg::COMPRESSED_IMAGE,
+ device1);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mDataMessageTypeReceived = DataCallbackMsg::RAW_IMAGE_NOTIFY;
+ }
+
+ Return<Status> returnStatus = device1->takePicture();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ waitForFrameLocked(DataCallbackMsg::COMPRESSED_IMAGE, l);
+ }
+
+ disableMsgType((unsigned int)DataCallbackMsg::COMPRESSED_IMAGE,
+ device1);
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Image capture should fail in case preview didn't get enabled first.
+TEST_F(CameraHidlTest, takePictureFail) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ Return<Status> returnStatus = device1->takePicture();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_NE(Status::OK, returnStatus);
+
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Verify that image capture can be cancelled.
+TEST_F(CameraHidlTest, cancelPicture) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+ startPreview(device1);
+
+ Return<Status> returnStatus = device1->takePicture();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ returnStatus = device1->cancelPicture();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Image capture cancel should fail when image capture is not running.
+TEST_F(CameraHidlTest, cancelPictureFail) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+ startPreview(device1);
+
+ Return<Status> returnStatus = device1->cancelPicture();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_NE(Status::OK, returnStatus);
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Test basic video recording.
+TEST_F(CameraHidlTest, startStopRecording) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mDataMessageTypeReceived = DataCallbackMsg::RAW_IMAGE_NOTIFY;
+ }
+
+ enableMsgType((unsigned int)DataCallbackMsg::PREVIEW_FRAME, device1);
+ startPreview(device1);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ waitForFrameLocked(DataCallbackMsg::PREVIEW_FRAME, l);
+ mDataMessageTypeReceived = DataCallbackMsg::RAW_IMAGE_NOTIFY;
+ mVideoBufferIndex = UINT32_MAX;
+ }
+
+ disableMsgType((unsigned int)DataCallbackMsg::PREVIEW_FRAME, device1);
+
+ bool videoMetaEnabled = false;
+ Return<Status> returnStatus = device1->storeMetaDataInBuffers(true);
+ ASSERT_TRUE(returnStatus.isOk());
+ // It is allowed for devices to not support this feature
+ ASSERT_TRUE((Status::OK == returnStatus) ||
+ (Status::OPERATION_NOT_SUPPORTED == returnStatus));
+ if (Status::OK == returnStatus) {
+ videoMetaEnabled = true;
+ }
+
+ enableMsgType((unsigned int)DataCallbackMsg::VIDEO_FRAME, device1);
+ Return<bool> returnBoolStatus = device1->recordingEnabled();
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_FALSE(returnBoolStatus);
+
+ returnStatus = device1->startRecording();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ waitForFrameLocked(DataCallbackMsg::VIDEO_FRAME, l);
+ ASSERT_NE(UINT32_MAX, mVideoBufferIndex);
+ disableMsgType((unsigned int)DataCallbackMsg::VIDEO_FRAME,
+ device1);
+ }
+
+ returnBoolStatus = device1->recordingEnabled();
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_TRUE(returnBoolStatus);
+
+ Return<void> ret;
+ if (videoMetaEnabled) {
+ ret = device1->releaseRecordingFrameHandle(mVideoData,
+ mVideoBufferIndex, mVideoNativeHandle);
+ ASSERT_TRUE(ret.isOk());
+ } else {
+ ret = device1->releaseRecordingFrame(mVideoData, mVideoBufferIndex);
+ ASSERT_TRUE(ret.isOk());
+ }
+
+ ret = device1->stopRecording();
+ ASSERT_TRUE(ret.isOk());
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// It shouldn't be possible to start recording without enabling preview first.
+TEST_F(CameraHidlTest, startRecordingFail) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ Return<bool> returnBoolStatus = device1->recordingEnabled();
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_FALSE(returnBoolStatus);
+
+ Return<Status> returnStatus = device1->startRecording();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_NE(Status::OK, returnStatus);
+
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Check autofocus support if available.
+TEST_F(CameraHidlTest, autoFocus) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+ std::vector<const char *> focusModes = {CameraParameters::FOCUS_MODE_AUTO,
+ CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE,
+ CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO};
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ ::android::CameraParameters cameraParams;
+ getParameters(device1, &cameraParams /*out*/);
+
+ if (Status::OK != isAutoFocusModeAvailable(cameraParams,
+ CameraParameters::FOCUS_MODE_AUTO)) {
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ continue;
+ }
+
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+ startPreview(device1);
+ enableMsgType((unsigned int)NotifyCallbackMsg::FOCUS, device1);
+
+ for (auto &iter : focusModes) {
+ if (Status::OK != isAutoFocusModeAvailable(cameraParams,
+ iter)) {
+ continue;
+ }
+
+ cameraParams.set(CameraParameters::KEY_FOCUS_MODE, iter);
+ setParameters(device1, cameraParams);
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mNotifyMessage = NotifyCallbackMsg::ERROR;
+ }
+
+ Return<Status> returnStatus = device1->autoFocus();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ while (NotifyCallbackMsg::FOCUS != mNotifyMessage) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kAutoFocusTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mResultCondition.wait_until(l, timeout));
+ }
+ }
+ }
+
+ disableMsgType((unsigned int)NotifyCallbackMsg::FOCUS, device1);
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// In case autofocus is supported verify that it can be cancelled.
+TEST_F(CameraHidlTest, cancelAutoFocus) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ ::android::CameraParameters cameraParams;
+ getParameters(device1, &cameraParams /*out*/);
+
+ if (Status::OK != isAutoFocusModeAvailable(cameraParams,
+ CameraParameters::FOCUS_MODE_AUTO)) {
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ continue;
+ }
+
+ // It should be fine to call before preview starts.
+ ASSERT_EQ(Status::OK, device1->cancelAutoFocus());
+
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+ startPreview(device1);
+
+ // It should be fine to call after preview starts too.
+ Return<Status> returnStatus = device1->cancelAutoFocus();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ returnStatus = device1->autoFocus();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ returnStatus = device1->cancelAutoFocus();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Check whether face detection is available and try to enable&disable.
+TEST_F(CameraHidlTest, sendCommandFaceDetection) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ ::android::CameraParameters cameraParams;
+ getParameters(device1, &cameraParams /*out*/);
+
+ int32_t hwFaces = cameraParams.getInt(
+ CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW);
+ int32_t swFaces = cameraParams.getInt(
+ CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW);
+ if ((0 >= hwFaces) && (0 >= swFaces)) {
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ continue;
+ }
+
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+ startPreview(device1);
+
+ if (0 < hwFaces) {
+ Return<Status> returnStatus = device1->sendCommand(
+ CommandType::START_FACE_DETECTION,
+ CAMERA_FACE_DETECTION_HW, 0);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ // TODO(epeev) : Enable and check for face notifications
+ returnStatus = device1->sendCommand(
+ CommandType::STOP_FACE_DETECTION,
+ CAMERA_FACE_DETECTION_HW, 0);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ }
+
+ if (0 < swFaces) {
+ Return<Status> returnStatus = device1->sendCommand(
+ CommandType::START_FACE_DETECTION,
+ CAMERA_FACE_DETECTION_SW, 0);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ // TODO(epeev) : Enable and check for face notifications
+ returnStatus = device1->sendCommand(
+ CommandType::STOP_FACE_DETECTION,
+ CAMERA_FACE_DETECTION_SW, 0);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ }
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Check whether smooth zoom is available and try to enable&disable.
+TEST_F(CameraHidlTest, sendCommandSmoothZoom) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ ::android::CameraParameters cameraParams;
+ getParameters(device1, &cameraParams /*out*/);
+
+ const char *smoothZoomStr = cameraParams.get(
+ CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED);
+ bool smoothZoomSupported = ((nullptr != smoothZoomStr) &&
+ (strcmp(smoothZoomStr, CameraParameters::TRUE) == 0)) ?
+ true : false;
+ if (!smoothZoomSupported) {
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
+ continue;
+ }
+
+ int32_t maxZoom = cameraParams.getInt(
+ CameraParameters::KEY_MAX_ZOOM);
+ ASSERT_TRUE(0 < maxZoom);
+
+ sp<BufferItemConsumer> bufferItemConsumer;
+ sp<BufferItemHander> bufferHandler;
+ setupPreviewWindow(device1, &bufferItemConsumer /*out*/,
+ &bufferHandler /*out*/);
+ startPreview(device1);
+ setParameters(device1, cameraParams);
+
+ Return<Status> returnStatus = device1->sendCommand(
+ CommandType::START_SMOOTH_ZOOM, maxZoom, 0);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ // TODO(epeev) : Enable and check for face notifications
+ returnStatus = device1->sendCommand(CommandType::STOP_SMOOTH_ZOOM,
+ 0, 0);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ stopPreviewAndClose(device1);
+ }
+ }
+}
+
+// Basic sanity tests related to camera parameters.
+TEST_F(CameraHidlTest, getSetParameters) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ ::android::CameraParameters cameraParams;
+ getParameters(device1, &cameraParams /*out*/);
+
+ int32_t width, height;
+ cameraParams.getPictureSize(&width, &height);
+ ASSERT_TRUE((0 < width) && (0 < height));
+ cameraParams.getPreviewSize(&width, &height);
+ ASSERT_TRUE((0 < width) && (0 < height));
+ int32_t minFps, maxFps;
+ cameraParams.getPreviewFpsRange(&minFps, &maxFps);
+ ASSERT_TRUE((0 < minFps) && (0 < maxFps));
+ ASSERT_NE(nullptr, cameraParams.getPreviewFormat());
+ ASSERT_NE(nullptr, cameraParams.getPictureFormat());
+ ASSERT_TRUE(strcmp(CameraParameters::PIXEL_FORMAT_JPEG,
+ cameraParams.getPictureFormat()) == 0);
+
+ const char *flashMode = cameraParams.get(
+ CameraParameters::KEY_FLASH_MODE);
+ ASSERT_TRUE((nullptr == flashMode) || (strcmp(
+ CameraParameters::FLASH_MODE_OFF, flashMode) == 0));
+
+ const char *wbMode = cameraParams.get(
+ CameraParameters::KEY_WHITE_BALANCE);
+ ASSERT_TRUE((nullptr == wbMode) || (strcmp(
+ CameraParameters::WHITE_BALANCE_AUTO, wbMode) == 0));
+
+ const char *effect = cameraParams.get(CameraParameters::KEY_EFFECT);
+ ASSERT_TRUE((nullptr == effect) || (strcmp(
+ CameraParameters::EFFECT_NONE, effect) == 0));
+
+ ::android::Vector<::android::Size> previewSizes;
+ cameraParams.getSupportedPreviewSizes(previewSizes);
+ ASSERT_FALSE(previewSizes.empty());
+ ::android::Vector<::android::Size> pictureSizes;
+ cameraParams.getSupportedPictureSizes(pictureSizes);
+ ASSERT_FALSE(pictureSizes.empty());
+ const char *previewFormats = cameraParams.get(
+ CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
+ ASSERT_NE(nullptr, previewFormats);
+ ::android::String8 previewFormatsString(previewFormats);
+ ASSERT_TRUE(previewFormatsString.contains(
+ CameraParameters::PIXEL_FORMAT_YUV420SP));
+ ASSERT_NE(nullptr, cameraParams.get(
+ CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
+ ASSERT_NE(nullptr, cameraParams.get(
+ CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
+ const char *focusModes = cameraParams.get(
+ CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
+ ASSERT_NE(nullptr, focusModes);
+ ::android::String8 focusModesString(focusModes);
+ const char *focusMode = cameraParams.get(
+ CameraParameters::KEY_FOCUS_MODE);
+ ASSERT_NE(nullptr, focusMode);
+ // Auto focus mode should be default
+ if (focusModesString.contains(CameraParameters::FOCUS_MODE_AUTO)) {
+ ASSERT_TRUE(strcmp(
+ CameraParameters::FOCUS_MODE_AUTO, focusMode) == 0);
+ }
+ ASSERT_TRUE(0 < cameraParams.getInt(
+ CameraParameters::KEY_FOCAL_LENGTH));
+ int32_t horizontalViewAngle = cameraParams.getInt(
+ CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE);
+ ASSERT_TRUE((0 < horizontalViewAngle) && (360 >= horizontalViewAngle));
+ int32_t verticalViewAngle = cameraParams.getInt(
+ CameraParameters::KEY_VERTICAL_VIEW_ANGLE);
+ ASSERT_TRUE((0 < verticalViewAngle) && (360 >= verticalViewAngle));
+ int32_t jpegQuality = cameraParams.getInt(
+ CameraParameters::KEY_JPEG_QUALITY);
+ ASSERT_TRUE((1 <= jpegQuality) && (100 >= jpegQuality));
+ int32_t jpegThumbQuality = cameraParams.getInt(
+ CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY);
+ ASSERT_TRUE((1 <= jpegThumbQuality) && (100 >= jpegThumbQuality));
+
+ cameraParams.setPictureSize(pictureSizes[0].width,
+ pictureSizes[0].height);
+ cameraParams.setPreviewSize(previewSizes[0].width,
+ previewSizes[0].height);
+
+ setParameters(device1, cameraParams);
+ getParameters(device1, &cameraParams /*out*/);
+
+ cameraParams.getPictureSize(&width, &height);
+ ASSERT_TRUE((pictureSizes[0].width == width) &&
+ (pictureSizes[0].height == height));
+ cameraParams.getPreviewSize(&width, &height);
+ ASSERT_TRUE((previewSizes[0].width == width) &&
+ (previewSizes[0].height == height));
+
+ Return<void> ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -376,7 +1637,8 @@
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
ALOGI("getCameraCharacteristics: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
@@ -384,8 +1646,9 @@
ASSERT_NE(device, nullptr);
device3_2 = device;
});
+ ASSERT_TRUE(ret.isOk());
- device3_2->getCameraCharacteristics(
+ ret = device3_2->getCameraCharacteristics(
[&](auto status, const auto& chars) {
ALOGI("getCameraCharacteristics returns status:%d", (int)status);
ASSERT_EQ(Status::OK, status);
@@ -398,17 +1661,20 @@
ASSERT_GT(entryCount, 0u);
ALOGI("getCameraCharacteristics metadata entry count is %zu", entryCount);
});
+ ASSERT_TRUE(ret.isOk());
}
}
}
//In case it is supported verify that torch can be enabled.
+//Check for corresponding toch callbacks as well.
TEST_F(CameraHidlTest, setTorchMode) {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
bool torchControlSupported = false;
+ Return<void> ret;
- CameraHidlEnvironment::Instance()->mProvider->isSetTorchModeSupported(
+ ret = CameraHidlEnvironment::Instance()->mProvider->isSetTorchModeSupported(
[&](auto status, bool support) {
ALOGI("isSetTorchModeSupported returns status:%d supported:%d",
(int)status, support);
@@ -416,11 +1682,17 @@
torchControlSupported = support;
});
+
+ sp<TorchProviderCb> cb = new TorchProviderCb(this);
+ Return<Status> returnStatus = env->mProvider->setCallback(cb);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
ALOGI("setTorchMode: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
@@ -428,32 +1700,116 @@
ASSERT_NE(device, nullptr);
device3_2 = device;
});
+ ASSERT_TRUE(ret.isOk());
- Status status = device3_2->setTorchMode(TorchMode::ON);
- ALOGI("setTorchMode return status %d", (int)status);
+ mTorchStatus = TorchModeStatus::NOT_AVAILABLE;
+ returnStatus = device3_2->setTorchMode(TorchMode::ON);
+ ASSERT_TRUE(returnStatus.isOk());
if (!torchControlSupported) {
- ASSERT_EQ(Status::METHOD_NOT_SUPPORTED, status);
+ ASSERT_EQ(Status::METHOD_NOT_SUPPORTED, returnStatus);
} else {
- ASSERT_TRUE(status == Status::OK || status == Status::OPERATION_NOT_SUPPORTED);
- if (status == Status::OK) {
- status = device3_2->setTorchMode(TorchMode::OFF);
- ASSERT_EQ(Status::OK, status);
+ ASSERT_TRUE(returnStatus == Status::OK ||
+ returnStatus == Status::OPERATION_NOT_SUPPORTED);
+ if (returnStatus == Status::OK) {
+ {
+ std::unique_lock<std::mutex> l(mTorchLock);
+ while (TorchModeStatus::NOT_AVAILABLE == mTorchStatus) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kTorchTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mTorchCond.wait_until(l, timeout));
+ }
+ ASSERT_EQ(TorchModeStatus::AVAILABLE_ON, mTorchStatus);
+ mTorchStatus = TorchModeStatus::NOT_AVAILABLE;
+ }
+
+ returnStatus = device3_2->setTorchMode(TorchMode::OFF);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mTorchLock);
+ while (TorchModeStatus::NOT_AVAILABLE == mTorchStatus) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kTorchTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mTorchCond.wait_until(l, timeout));
+ }
+ ASSERT_EQ(TorchModeStatus::AVAILABLE_OFF, mTorchStatus);
+ }
}
}
+ } else if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ ::android::sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ ALOGI("dumpState: Testing camera device %s", name.c_str());
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device1 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ mTorchStatus = TorchModeStatus::NOT_AVAILABLE;
+ returnStatus = device1->setTorchMode(TorchMode::ON);
+ ASSERT_TRUE(returnStatus.isOk());
+ if (!torchControlSupported) {
+ ASSERT_EQ(Status::METHOD_NOT_SUPPORTED, returnStatus);
+ } else {
+ ASSERT_TRUE(returnStatus == Status::OK ||
+ returnStatus == Status::OPERATION_NOT_SUPPORTED);
+ if (returnStatus == Status::OK) {
+ {
+ std::unique_lock<std::mutex> l(mTorchLock);
+ while (TorchModeStatus::NOT_AVAILABLE == mTorchStatus) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kTorchTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mTorchCond.wait_until(l, timeout));
+ }
+ ASSERT_EQ(TorchModeStatus::AVAILABLE_ON, mTorchStatus);
+ mTorchStatus = TorchModeStatus::NOT_AVAILABLE;
+ }
+
+ returnStatus = device1->setTorchMode(TorchMode::OFF);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mTorchLock);
+ while (TorchModeStatus::NOT_AVAILABLE == mTorchStatus) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kTorchTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mTorchCond.wait_until(l, timeout));
+ }
+ ASSERT_EQ(TorchModeStatus::AVAILABLE_OFF, mTorchStatus);
+ }
+ }
+ }
+ ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
}
}
+
+ returnStatus = env->mProvider->setCallback(nullptr);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
}
// Check dump functionality.
TEST_F(CameraHidlTest, dumpState) {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+ Return<void> ret;
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
ALOGI("dumpState: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
@@ -461,12 +1817,36 @@
ASSERT_NE(device, nullptr);
device3_2 = device;
});
+ ASSERT_TRUE(ret.isOk());
native_handle_t* raw_handle = native_handle_create(1, 0);
- raw_handle->data[0] = open(DUMP_OUTPUT, O_RDWR);
+ raw_handle->data[0] = open(kDumpOutput, O_RDWR);
ASSERT_GE(raw_handle->data[0], 0);
hidl_handle handle = raw_handle;
- device3_2->dumpState(handle);
+ ret= device3_2->dumpState(handle);
+ ASSERT_TRUE(ret.isOk());
+ close(raw_handle->data[0]);
+ native_handle_delete(raw_handle);
+ } else if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ ::android::sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ ALOGI("dumpState: Testing camera device %s", name.c_str());
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device1 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ native_handle_t* raw_handle = native_handle_create(1, 0);
+ raw_handle->data[0] = open(kDumpOutput, O_RDWR);
+ ASSERT_GE(raw_handle->data[0], 0);
+ hidl_handle handle = raw_handle;
+ Return<Status> returnStatus = device1->dumpState(handle);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
close(raw_handle->data[0]);
native_handle_delete(raw_handle);
}
@@ -477,12 +1857,13 @@
TEST_F(CameraHidlTest, openClose) {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+ Return<void> ret;
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
ALOGI("openClose: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
@@ -490,10 +1871,11 @@
ASSERT_NE(device, nullptr);
device3_2 = device;
});
+ ASSERT_TRUE(ret.isOk());
sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
sp<ICameraDeviceSession> session;
- device3_2->open(
+ ret = device3_2->open(
cb,
[&](auto status, const auto& newSession) {
ALOGI("device::open returns status:%d", (int)status);
@@ -501,18 +1883,38 @@
ASSERT_NE(newSession, nullptr);
session = newSession;
});
+ ASSERT_TRUE(ret.isOk());
native_handle_t* raw_handle = native_handle_create(1, 0);
- raw_handle->data[0] = open(DUMP_OUTPUT, O_RDWR);
+ raw_handle->data[0] = open(kDumpOutput, O_RDWR);
ASSERT_GE(raw_handle->data[0], 0);
hidl_handle handle = raw_handle;
- device3_2->dumpState(handle);
+ ret = device3_2->dumpState(handle);
+ ASSERT_TRUE(ret.isOk());
close(raw_handle->data[0]);
native_handle_delete(raw_handle);
- session->close();
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
// TODO: test all session API calls return INTERNAL_ERROR after close
// TODO: keep a wp copy here and verify session cannot be promoted out of this scope
+ } else if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_1_0) {
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> device1;
+ openCameraDevice(name, env, &device1 /*out*/);
+ ASSERT_NE(nullptr, device1.get());
+
+ native_handle_t* raw_handle = native_handle_create(1, 0);
+ raw_handle->data[0] = open(kDumpOutput, O_RDWR);
+ ASSERT_GE(raw_handle->data[0], 0);
+ hidl_handle handle = raw_handle;
+ Return<Status> returnStatus = device1->dumpState(handle);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ close(raw_handle->data[0]);
+ native_handle_delete(raw_handle);
+
+ ret = device1->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -526,8 +1928,9 @@
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
+ Return<void> ret;
ALOGI("constructDefaultRequestSettings: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
name,
[&](auto status, const auto& device) {
ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
@@ -535,10 +1938,11 @@
ASSERT_NE(device, nullptr);
device3_2 = device;
});
+ ASSERT_TRUE(ret.isOk());
sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
sp<ICameraDeviceSession> session;
- device3_2->open(
+ ret = device3_2->open(
cb,
[&](auto status, const auto& newSession) {
ALOGI("device::open returns status:%d", (int)status);
@@ -546,11 +1950,12 @@
ASSERT_NE(newSession, nullptr);
session = newSession;
});
+ ASSERT_TRUE(ret.isOk());
for (uint32_t t = (uint32_t) RequestTemplate::PREVIEW;
t <= (uint32_t) RequestTemplate::MANUAL; t++) {
RequestTemplate reqTemplate = (RequestTemplate) t;
- session->constructDefaultRequestSettings(
+ ret = session->constructDefaultRequestSettings(
reqTemplate,
[&](auto status, const auto& req) {
ALOGI("constructDefaultRequestSettings returns status:%d", (int)status);
@@ -577,8 +1982,10 @@
ASSERT_EQ(0u, req.size());
}
});
+ ASSERT_TRUE(ret.isOk());
}
- session->close();
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -592,35 +1999,12 @@
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
+ Return<void> ret;
+ sp<ICameraDeviceSession> session;
+ openEmptyDeviceSession(name, env, &session /*out*/,
+ &staticMeta /*out*/);
+
outputStreams.clear();
ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
outputStreams));
@@ -636,16 +2020,19 @@
::android::hardware::hidl_vec<Stream> streams = {stream};
StreamConfiguration config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration halConfig) {
ASSERT_EQ(Status::OK, s);
ASSERT_EQ(1u, halConfig.streams.size());
ASSERT_EQ(halConfig.streams[0].id, streamId);
});
+ ASSERT_TRUE(ret.isOk());
streamId++;
}
- session->close();
+ free_camera_metadata(staticMeta);
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -658,35 +2045,12 @@
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
+ Return<void> ret;
+ sp<ICameraDeviceSession> session;
+ openEmptyDeviceSession(name, env, &session /*out*/,
+ &staticMeta /*out*/);
+
outputStreams.clear();
ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
outputStreams));
@@ -701,10 +2065,12 @@
::android::hardware::hidl_vec<Stream> streams = {stream};
StreamConfiguration config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [] (Status s,
+ ret = session->configureStreams(config, [] (Status s,
HalStreamConfiguration) {
- ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
+ ASSERT_TRUE((Status::ILLEGAL_ARGUMENT == s) ||
+ (Status::INTERNAL_ERROR == s));
});
+ ASSERT_TRUE(ret.isOk());
stream = {streamId++, StreamType::OUTPUT,
static_cast<uint32_t> (UINT32_MAX),
@@ -714,10 +2080,11 @@
streams[0] = stream;
config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [] (Status s,
+ ret = session->configureStreams(config, [] (Status s,
HalStreamConfiguration) {
ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
});
+ ASSERT_TRUE(ret.isOk());
for (auto &it : outputStreams) {
stream = {streamId++, StreamType::OUTPUT,
@@ -728,10 +2095,11 @@
streams[0] = stream;
config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [] (Status s,
+ ret = session->configureStreams(config, [] (Status s,
HalStreamConfiguration) {
ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
});
+ ASSERT_TRUE(ret.isOk());
stream = {streamId++, StreamType::OUTPUT,
static_cast<uint32_t> (it.width),
@@ -741,13 +2109,16 @@
streams[0] = stream;
config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [] (Status s,
+ ret = session->configureStreams(config, [] (Status s,
HalStreamConfiguration) {
ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
});
+ ASSERT_TRUE(ret.isOk());
}
- session->close();
+ free_camera_metadata(staticMeta);
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -762,41 +2133,19 @@
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
- Status ret = isZSLModeAvailable(staticMeta);
- if (Status::METHOD_NOT_SUPPORTED == ret) {
- session->close();
+ Return<void> ret;
+ sp<ICameraDeviceSession> session;
+ openEmptyDeviceSession(name, env, &session /*out*/,
+ &staticMeta /*out*/);
+
+ Status rc = isZSLModeAvailable(staticMeta);
+ if (Status::METHOD_NOT_SUPPORTED == rc) {
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
continue;
}
- ASSERT_EQ(Status::OK, ret);
+ ASSERT_EQ(Status::OK, rc);
inputStreams.clear();
ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
@@ -842,15 +2191,18 @@
inputStream, zslStream, outputStream};
StreamConfiguration config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration halConfig) {
ASSERT_EQ(Status::OK, s);
ASSERT_EQ(3u, halConfig.streams.size());
});
+ ASSERT_TRUE(ret.isOk());
}
}
- session->close();
+ free_camera_metadata(staticMeta);
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -862,42 +2214,19 @@
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
std::vector<AvailableStream> outputBlobStreams;
std::vector<AvailableStream> outputPreviewStreams;
- AvailableStream previewThreshold = {MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT,
+ AvailableStream previewThreshold = {kMaxPreviewWidth, kMaxPreviewHeight,
static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
AvailableStream blobThreshold = {INT32_MAX, INT32_MAX,
static_cast<int32_t>(PixelFormat::BLOB)};
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
+ Return<void> ret;
+ sp<ICameraDeviceSession> session;
+ openEmptyDeviceSession(name, env, &session /*out*/,
+ &staticMeta /*out*/);
+
outputBlobStreams.clear();
ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
outputBlobStreams, &blobThreshold));
@@ -925,15 +2254,18 @@
previewStream, blobStream};
StreamConfiguration config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration halConfig) {
ASSERT_EQ(Status::OK, s);
ASSERT_EQ(2u, halConfig.streams.size());
});
+ ASSERT_TRUE(ret.isOk());
}
}
- session->close();
+ free_camera_metadata(staticMeta);
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -947,38 +2279,16 @@
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
+ Return<void> ret;
+ sp<ICameraDeviceSession> session;
+ openEmptyDeviceSession(name, env, &session /*out*/,
+ &staticMeta /*out*/);
+
Status rc = isConstrainedModeAvailable(staticMeta);
if (Status::METHOD_NOT_SUPPORTED == rc) {
- session->close();
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
continue;
}
ASSERT_EQ(Status::OK, rc);
@@ -996,12 +2306,13 @@
::android::hardware::hidl_vec<Stream> streams = {stream};
StreamConfiguration config = {streams,
StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration halConfig) {
ASSERT_EQ(Status::OK, s);
ASSERT_EQ(1u, halConfig.streams.size());
ASSERT_EQ(halConfig.streams[0].id, streamId);
});
+ ASSERT_TRUE(ret.isOk());
stream = {streamId++, StreamType::OUTPUT,
static_cast<uint32_t> (0),
@@ -1011,10 +2322,12 @@
streams[0] = stream;
config = {streams,
StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration) {
- ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
+ ASSERT_TRUE((Status::ILLEGAL_ARGUMENT == s) ||
+ (Status::INTERNAL_ERROR == s));
});
+ ASSERT_TRUE(ret.isOk());
stream = {streamId++, StreamType::OUTPUT,
static_cast<uint32_t> (UINT32_MAX),
@@ -1024,10 +2337,11 @@
streams[0] = stream;
config = {streams,
StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration) {
ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
});
+ ASSERT_TRUE(ret.isOk());
stream = {streamId++, StreamType::OUTPUT,
static_cast<uint32_t> (hfrStream.width),
@@ -1037,12 +2351,15 @@
streams[0] = stream;
config = {streams,
StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration) {
ASSERT_EQ(Status::ILLEGAL_ARGUMENT, s);
});
+ ASSERT_TRUE(ret.isOk());
- session->close();
+ free_camera_metadata(staticMeta);
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -1054,42 +2371,19 @@
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
std::vector<AvailableStream> outputBlobStreams;
std::vector<AvailableStream> outputVideoStreams;
- AvailableStream videoThreshold = {MAX_VIDEO_WIDTH, MAX_VIDEO_HEIGHT,
+ AvailableStream videoThreshold = {kMaxVideoWidth, kMaxVideoHeight,
static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
- AvailableStream blobThreshold = {MAX_VIDEO_WIDTH, MAX_VIDEO_HEIGHT,
+ AvailableStream blobThreshold = {kMaxVideoWidth, kMaxVideoHeight,
static_cast<int32_t>(PixelFormat::BLOB)};
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<EmptyDeviceCb> cb = new EmptyDeviceCb;
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
+ Return<void> ret;
+ sp<ICameraDeviceSession> session;
+ openEmptyDeviceSession(name, env, &session /*out*/,
+ &staticMeta /*out*/);
+
outputBlobStreams.clear();
ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
outputBlobStreams, &blobThreshold));
@@ -1118,15 +2412,18 @@
videoStream, blobStream};
StreamConfiguration config = {streams,
StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [streamId] (Status s,
+ ret = session->configureStreams(config, [streamId] (Status s,
HalStreamConfiguration halConfig) {
ASSERT_EQ(Status::OK, s);
ASSERT_EQ(2u, halConfig.streams.size());
});
+ ASSERT_TRUE(ret.isOk());
}
}
- session->close();
+ free_camera_metadata(staticMeta);
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -1135,72 +2432,28 @@
TEST_F(CameraHidlTest, processCaptureRequestPreview) {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
- std::vector<AvailableStream> outputPreviewStreams;
- AvailableStream previewThreshold = {MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT,
+ AvailableStream previewThreshold = {kMaxPreviewWidth, kMaxPreviewHeight,
static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
- int32_t streamId = 0;
uint64_t bufferId = 1;
uint32_t frameNumber = 1;
::android::hardware::hidl_vec<uint8_t> settings;
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<DeviceCb> cb = new DeviceCb(this);
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
- camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
-
- outputPreviewStreams.clear();
- ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
- outputPreviewStreams, &previewThreshold));
- ASSERT_NE(0u, outputPreviewStreams.size());
-
+ Stream previewStream;
HalStreamConfiguration halStreamConfig;
- Stream previewStream = {streamId, StreamType::OUTPUT,
- static_cast<uint32_t> (outputPreviewStreams[0].width),
- static_cast<uint32_t> (outputPreviewStreams[0].height),
- static_cast<PixelFormat> (outputPreviewStreams[0].format),
- 0, 0, StreamRotation::ROTATION_0};
- ::android::hardware::hidl_vec<Stream> streams = {previewStream};
- StreamConfiguration config = {streams,
- StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [&] (Status s,
- HalStreamConfiguration halConfig) {
- ASSERT_EQ(Status::OK, s);
- ASSERT_EQ(1u, halConfig.streams.size());
- halStreamConfig = halConfig;
- });
+ sp<ICameraDeviceSession> session;
+ configurePreviewStream(name, env, &previewThreshold,
+ &session /*out*/, &previewStream /*out*/,
+ &halStreamConfig /*out*/);
RequestTemplate reqTemplate = RequestTemplate::PREVIEW;
- session->constructDefaultRequestSettings(reqTemplate,
+ Return<void> ret;
+ ret = session->constructDefaultRequestSettings(reqTemplate,
[&](auto status, const auto& req) {
ASSERT_EQ(Status::OK, status);
settings = req; });
+ ASSERT_TRUE(ret.isOk());
sp<GraphicBuffer> gb = new GraphicBuffer(previewStream.width,
previewStream.height,
@@ -1213,49 +2466,61 @@
BufferStatus::OK, nullptr, nullptr};
::android::hardware::hidl_vec<StreamBuffer> outputBuffers = {
outputBuffer};
+ StreamBuffer emptyInputBuffer = {-1, 0, nullptr,
+ BufferStatus::ERROR, nullptr, nullptr};
CaptureRequest request = {frameNumber, settings,
- {-1, 0, nullptr, BufferStatus::ERROR, nullptr, nullptr},
- outputBuffers};
+ emptyInputBuffer, outputBuffers};
- std::unique_lock<std::mutex> l(mLock);
- mResultBuffers.clear();
- mResultFrameNumber = frameNumber;
- l.unlock();
-
- ASSERT_EQ(Status::OK, session->processCaptureRequest(request));
-
- l.lock();
- while (0 == mResultBuffers.size()) {
- auto timeout = std::chrono::system_clock::now() +
- std::chrono::seconds(STREAM_BUFFER_TIMEOUT);
- ASSERT_NE(std::cv_status::timeout,
- mResultCondition.wait_until(l, timeout));
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mResultBuffers.clear();
+ mResultFrameNumber = frameNumber;
}
- ASSERT_EQ(BufferStatus::OK, mResultBuffers[0].status);
- ASSERT_EQ(previewStream.id, mResultBuffers[0].streamId);
+ Return<Status> returnStatus = session->processCaptureRequest(
+ request);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
- request.frameNumber++;
- //Empty settings should be supported after the first call
- //for repeating requests.
- request.settings.setToExternal(nullptr, 0, true);
- mResultBuffers.clear();
- mResultFrameNumber++;
- l.unlock();
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ while (0 == mResultBuffers.size()) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kStreamBufferTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mResultCondition.wait_until(l, timeout));
+ }
- ASSERT_EQ(Status::OK, session->processCaptureRequest(request));
+ ASSERT_EQ(BufferStatus::OK, mResultBuffers[0].status);
+ ASSERT_EQ(previewStream.id, mResultBuffers[0].streamId);
- l.lock();
- while (0 == mResultBuffers.size()) {
- auto timeout = std::chrono::system_clock::now() +
- std::chrono::seconds(STREAM_BUFFER_TIMEOUT);
- ASSERT_NE(std::cv_status::timeout,
- mResultCondition.wait_until(l, timeout));
+ request.frameNumber++;
+ //Empty settings should be supported after the first call
+ //for repeating requests.
+ request.settings.setToExternal(nullptr, 0, true);
+ mResultBuffers.clear();
+ mResultFrameNumber++;
}
- ASSERT_EQ(BufferStatus::OK, mResultBuffers[0].status);
- ASSERT_EQ(previewStream.id, mResultBuffers[0].streamId);
- session->close();
+ returnStatus = session->processCaptureRequest(
+ request);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ while (0 == mResultBuffers.size()) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kStreamBufferTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mResultCondition.wait_until(l, timeout));
+ }
+ ASSERT_EQ(BufferStatus::OK, mResultBuffers[0].status);
+ ASSERT_EQ(previewStream.id, mResultBuffers[0].streamId);
+ }
+
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -1266,65 +2531,20 @@
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
std::vector<AvailableStream> outputPreviewStreams;
- AvailableStream previewThreshold = {MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT,
+ AvailableStream previewThreshold = {kMaxPreviewWidth, kMaxPreviewHeight,
static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
- int32_t streamId = 0;
uint64_t bufferId = 1;
uint32_t frameNumber = 1;
::android::hardware::hidl_vec<uint8_t> settings;
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<DeviceCb> cb = new DeviceCb(this);
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
- camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
-
- outputPreviewStreams.clear();
- ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
- outputPreviewStreams, &previewThreshold));
- ASSERT_NE(0u, outputPreviewStreams.size());
-
+ Stream previewStream;
HalStreamConfiguration halStreamConfig;
- Stream previewStream = {streamId, StreamType::OUTPUT,
- static_cast<uint32_t> (outputPreviewStreams[0].width),
- static_cast<uint32_t> (outputPreviewStreams[0].height),
- static_cast<PixelFormat> (outputPreviewStreams[0].format),
- 0, 0, StreamRotation::ROTATION_0};
- ::android::hardware::hidl_vec<Stream> streams = {previewStream};
- StreamConfiguration config = {streams,
- StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [&] (Status s,
- HalStreamConfiguration halConfig) {
- ASSERT_EQ(Status::OK, s);
- ASSERT_EQ(1u, halConfig.streams.size());
- halStreamConfig = halConfig;
- });
+ sp<ICameraDeviceSession> session;
+ configurePreviewStream(name, env, &previewThreshold,
+ &session /*out*/, &previewStream /*out*/,
+ &halStreamConfig /*out*/);
sp<GraphicBuffer> gb = new GraphicBuffer(previewStream.width,
previewStream.height,
@@ -1337,104 +2557,208 @@
BufferStatus::OK, nullptr, nullptr};
::android::hardware::hidl_vec<StreamBuffer> outputBuffers = {
outputBuffer};
+ StreamBuffer emptyInputBuffer = {-1, 0, nullptr,
+ BufferStatus::ERROR, nullptr, nullptr};
CaptureRequest request = {frameNumber, settings,
- {-1, 0, nullptr, BufferStatus::ERROR, nullptr, nullptr},
- outputBuffers};
+ emptyInputBuffer, outputBuffers};
//Settings were not correctly initialized, we should fail here
- ASSERT_EQ(Status::INTERNAL_ERROR,
- session->processCaptureRequest(request));
+ Return<Status> returnStatus = session->processCaptureRequest(
+ request);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::INTERNAL_ERROR, returnStatus);
- session->close();
+ Return<void> ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
// Check whether an invalid capture request with missing output buffers
// will be reported correctly.
-TEST_F(CameraHidlTest, processCaptureRequestInvalidSingleSnapshot) {
+TEST_F(CameraHidlTest, processCaptureRequestInvalidBuffer) {
CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
std::vector<AvailableStream> outputBlobStreams;
- AvailableStream blobThreshold = {INT32_MAX, INT32_MAX,
- static_cast<int32_t>(PixelFormat::BLOB)};
- int32_t streamId = 0;
+ AvailableStream previewThreshold = {kMaxPreviewWidth, kMaxPreviewHeight,
+ static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
+ uint32_t frameNumber = 1;
+ ::android::hardware::hidl_vec<uint8_t> settings;
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
+ Stream previewStream;
+ HalStreamConfiguration halStreamConfig;
+ sp<ICameraDeviceSession> session;
+ configurePreviewStream(name, env, &previewThreshold,
+ &session /*out*/, &previewStream /*out*/,
+ &halStreamConfig /*out*/);
+
+ RequestTemplate reqTemplate = RequestTemplate::PREVIEW;
+ Return<void> ret;
+ ret = session->constructDefaultRequestSettings(reqTemplate,
+ [&](auto status, const auto& req) {
+ ASSERT_EQ(Status::OK, status);
+ settings = req; });
+ ASSERT_TRUE(ret.isOk());
+
+ ::android::hardware::hidl_vec<StreamBuffer> emptyOutputBuffers;
+ StreamBuffer emptyInputBuffer = {-1, 0, nullptr,
+ BufferStatus::ERROR, nullptr, nullptr};
+ CaptureRequest request = {frameNumber, settings,
+ emptyInputBuffer, emptyOutputBuffers};
+
+ //Output buffers are missing, we should fail here
+ Return<Status> returnStatus = session->processCaptureRequest(
+ request);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::INTERNAL_ERROR,
+ returnStatus);
+
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Generate, trigger and flush a preview request
+TEST_F(CameraHidlTest, flushPreviewRequest) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+ std::vector<AvailableStream> outputPreviewStreams;
+ AvailableStream previewThreshold = {kMaxPreviewWidth, kMaxPreviewHeight,
+ static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
uint64_t bufferId = 1;
uint32_t frameNumber = 1;
::android::hardware::hidl_vec<uint8_t> settings;
for (const auto& name : cameraDeviceNames) {
if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
- ::android::sp<::android::hardware::camera::device::V3_2::ICameraDevice> device3_2;
- ALOGI("configureStreams: Testing camera device %s", name.c_str());
- env->mProvider->getCameraDeviceInterface_V3_x(
- name,
- [&](auto status, const auto& device) {
- ALOGI("getCameraDeviceInterface_V3_x returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(device, nullptr);
- device3_2 = device;
- });
-
- sp<DeviceCb> cb = new DeviceCb(this);
- sp<ICameraDeviceSession> session;
- device3_2->open(
- cb,
- [&](auto status, const auto& newSession) {
- ALOGI("device::open returns status:%d", (int)status);
- ASSERT_EQ(Status::OK, status);
- ASSERT_NE(newSession, nullptr);
- session = newSession;
- });
-
- camera_metadata_t *staticMeta;
- device3_2->getCameraCharacteristics([&] (Status s,
- CameraMetadata metadata) {
- ASSERT_EQ(Status::OK, s);
- staticMeta =
- reinterpret_cast<camera_metadata_t*>(metadata.data());
- });
-
- outputBlobStreams.clear();
- ASSERT_EQ(Status::OK, getAvailableOutputStreams(staticMeta,
- outputBlobStreams, &blobThreshold));
- ASSERT_NE(0u, outputBlobStreams.size());
-
+ Stream previewStream;
HalStreamConfiguration halStreamConfig;
- Stream previewStream = {streamId, StreamType::OUTPUT,
- static_cast<uint32_t> (outputBlobStreams[0].width),
- static_cast<uint32_t> (outputBlobStreams[0].height),
- static_cast<PixelFormat> (outputBlobStreams[0].format),
- 0, 0, StreamRotation::ROTATION_0};
- ::android::hardware::hidl_vec<Stream> streams = {previewStream};
- StreamConfiguration config = {streams,
- StreamConfigurationMode::NORMAL_MODE};
- session->configureStreams(config, [&] (Status s,
- HalStreamConfiguration halConfig) {
- ASSERT_EQ(Status::OK, s);
- ASSERT_EQ(1u, halConfig.streams.size());
- halStreamConfig = halConfig;
- });
+ sp<ICameraDeviceSession> session;
+ configurePreviewStream(name, env, &previewThreshold,
+ &session /*out*/, &previewStream /*out*/,
+ &halStreamConfig /*out*/);
- RequestTemplate reqTemplate = RequestTemplate::STILL_CAPTURE;
- session->constructDefaultRequestSettings(reqTemplate,
+ RequestTemplate reqTemplate = RequestTemplate::PREVIEW;
+ Return<void> ret;
+ ret = session->constructDefaultRequestSettings(reqTemplate,
[&](auto status, const auto& req) {
ASSERT_EQ(Status::OK, status);
settings = req; });
+ ASSERT_TRUE(ret.isOk());
+ sp<GraphicBuffer> gb = new GraphicBuffer(previewStream.width,
+ previewStream.height,
+ static_cast<int32_t> (halStreamConfig.streams[0].overrideFormat),
+ 1, halStreamConfig.streams[0].producerUsage,
+ halStreamConfig.streams[0].consumerUsage);
+ ASSERT_NE(nullptr, gb.get());
StreamBuffer outputBuffer = {halStreamConfig.streams[0].id,
- bufferId, hidl_handle(nullptr), BufferStatus::OK,
- nullptr, nullptr};
- ::android::hardware::hidl_vec<StreamBuffer> outputBuffers;
+ bufferId, hidl_handle(gb->getNativeBuffer()->handle),
+ BufferStatus::OK, nullptr, nullptr};
+ ::android::hardware::hidl_vec<StreamBuffer> outputBuffers = {
+ outputBuffer};
+ const StreamBuffer emptyInputBuffer = {-1, 0, nullptr,
+ BufferStatus::ERROR, nullptr, nullptr};
CaptureRequest request = {frameNumber, settings,
- {-1, 0, nullptr, BufferStatus::ERROR, nullptr, nullptr},
- outputBuffers};
+ emptyInputBuffer, outputBuffers};
- //Output buffers are missing, we should fail here
- ASSERT_EQ(Status::INTERNAL_ERROR,
- session->processCaptureRequest(request));
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mResultBuffers.clear();
+ mErrors.clear();
+ mResultFrameNumber = frameNumber;
+ }
- session->close();
+ Return<Status> returnStatus = session->processCaptureRequest(
+ request);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+ //Flush before waiting for request to complete.
+ returnStatus = session->flush();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ while ((0 == mResultBuffers.size()) && (0 == mErrors.size())) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kStreamBufferTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mResultCondition.wait_until(l, timeout));
+ }
+
+ if (mErrors.empty()) {
+ ASSERT_EQ(BufferStatus::OK, mResultBuffers[0].status);
+ ASSERT_EQ(previewStream.id, mResultBuffers[0].streamId);
+ } else {
+ for (auto &error : mErrors) {
+ switch (error.errorCode) {
+ case ErrorCode::ERROR_REQUEST:
+ case ErrorCode::ERROR_RESULT:
+ //Expected
+ break;
+ case ErrorCode::ERROR_BUFFER:
+ //Expected as well
+ ASSERT_EQ(frameNumber, error.frameNumber);
+ ASSERT_EQ(previewStream.id, error.errorStreamId);
+ break;
+ case ErrorCode::ERROR_DEVICE:
+ default:
+ FAIL() <<"Unexpected error:" << static_cast<uint32_t> (error.errorCode);
+ }
+ }
+ }
+ }
+
+ ret = session->close();
+ ASSERT_TRUE(ret.isOk());
+ }
+ }
+}
+
+// Verify that camera flushes correctly without any pending requests.
+TEST_F(CameraHidlTest, flushEmpty) {
+ CameraHidlEnvironment* env = CameraHidlEnvironment::Instance();
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames();
+ std::vector<AvailableStream> outputPreviewStreams;
+ AvailableStream previewThreshold = {kMaxPreviewWidth, kMaxPreviewHeight,
+ static_cast<int32_t>(PixelFormat::IMPLEMENTATION_DEFINED)};
+
+ for (const auto& name : cameraDeviceNames) {
+ if (getCameraDeviceVersion(name) == CAMERA_DEVICE_API_VERSION_3_2) {
+ Stream previewStream;
+ HalStreamConfiguration halStreamConfig;
+ sp<ICameraDeviceSession> session;
+ configurePreviewStream(name, env, &previewThreshold,
+ &session /*out*/, &previewStream /*out*/,
+ &halStreamConfig /*out*/);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ mResultBuffers.clear();
+ mErrors.clear();
+ mResultFrameNumber = 0;
+ }
+
+ Return<Status> returnStatus = session->flush();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+
+ {
+ std::unique_lock<std::mutex> l(mLock);
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::milliseconds(kEmptyFlushTimeoutMSec);
+ ASSERT_EQ(std::cv_status::timeout,
+ mResultCondition.wait_until(l, timeout));
+ ASSERT_TRUE(mErrors.empty());
+ ASSERT_TRUE(mResultBuffers.empty());
+ }
+
+ Return<void> ret = session->close();
+ ASSERT_TRUE(ret.isOk());
}
}
}
@@ -1443,7 +2767,7 @@
// static characteristics.
Status CameraHidlTest::getAvailableOutputStreams(camera_metadata_t *staticMeta,
std::vector<AvailableStream> &outputStreams,
- AvailableStream *threshold) {
+ const AvailableStream *threshold) {
if (nullptr == staticMeta) {
return Status::ILLEGAL_ARGUMENT;
}
@@ -1609,6 +2933,260 @@
return (result.format == format) ? Status::OK : Status::ILLEGAL_ARGUMENT;
}
+// Check whether the camera device supports specific focus mode.
+Status CameraHidlTest::isAutoFocusModeAvailable(
+ ::android::CameraParameters &cameraParams,
+ const char *mode) {
+ ::android::String8 focusModes(cameraParams.get(
+ CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
+ if (focusModes.contains(mode)) {
+ return Status::OK;
+ }
+
+ return Status::METHOD_NOT_SUPPORTED;
+}
+
+// Open a device session and configure a preview stream.
+void CameraHidlTest::configurePreviewStream(const std::string &name,
+ const CameraHidlEnvironment* env,
+ const AvailableStream *previewThreshold,
+ sp<ICameraDeviceSession> *session /*out*/,
+ Stream *previewStream /*out*/,
+ HalStreamConfiguration *halStreamConfig /*out*/) {
+ ASSERT_NE(nullptr, env);
+ ASSERT_NE(nullptr, session);
+ ASSERT_NE(nullptr, previewStream);
+ ASSERT_NE(nullptr, halStreamConfig);
+
+ std::vector<AvailableStream> outputPreviewStreams;
+ ::android::sp<ICameraDevice> device3_2;
+ ALOGI("configureStreams: Testing camera device %s", name.c_str());
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V3_x returns status:%d",
+ (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device3_2 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ sp<DeviceCb> cb = new DeviceCb(this);
+ ret = device3_2->open(
+ cb,
+ [&](auto status, const auto& newSession) {
+ ALOGI("device::open returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(newSession, nullptr);
+ *session = newSession;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ camera_metadata_t *staticMeta;
+ ret = device3_2->getCameraCharacteristics([&] (Status s,
+ CameraMetadata metadata) {
+ ASSERT_EQ(Status::OK, s);
+ staticMeta = clone_camera_metadata(
+ reinterpret_cast<const camera_metadata_t*>(metadata.data()));
+ ASSERT_NE(nullptr, staticMeta);
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ outputPreviewStreams.clear();
+ auto rc = getAvailableOutputStreams(staticMeta,
+ outputPreviewStreams, previewThreshold);
+ free_camera_metadata(staticMeta);
+ ASSERT_EQ(Status::OK, rc);
+ ASSERT_FALSE(outputPreviewStreams.empty());
+
+ *previewStream = {0, StreamType::OUTPUT,
+ static_cast<uint32_t> (outputPreviewStreams[0].width),
+ static_cast<uint32_t> (outputPreviewStreams[0].height),
+ static_cast<PixelFormat> (outputPreviewStreams[0].format),
+ 0, 0, StreamRotation::ROTATION_0};
+ ::android::hardware::hidl_vec<Stream> streams = {*previewStream};
+ StreamConfiguration config = {streams,
+ StreamConfigurationMode::NORMAL_MODE};
+ ret = (*session)->configureStreams(config, [&] (Status s,
+ HalStreamConfiguration halConfig) {
+ ASSERT_EQ(Status::OK, s);
+ ASSERT_EQ(1u, halConfig.streams.size());
+ *halStreamConfig = halConfig;
+ });
+ ASSERT_TRUE(ret.isOk());
+}
+
+// Open a device session with empty callbacks and return static metadata.
+void CameraHidlTest::openEmptyDeviceSession(const std::string &name,
+ const CameraHidlEnvironment* env,
+ sp<ICameraDeviceSession> *session /*out*/,
+ camera_metadata_t **staticMeta /*out*/) {
+ ASSERT_NE(nullptr, env);
+ ASSERT_NE(nullptr, session);
+ ASSERT_NE(nullptr, staticMeta);
+
+ ::android::sp<ICameraDevice> device3_2;
+ ALOGI("configureStreams: Testing camera device %s", name.c_str());
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V3_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V3_x returns status:%d",
+ (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ device3_2 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ sp<EmptyDeviceCb> cb = new EmptyDeviceCb();
+ ret = device3_2->open(cb, [&](auto status, const auto& newSession) {
+ ALOGI("device::open returns status:%d", (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(newSession, nullptr);
+ *session = newSession;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ ret = device3_2->getCameraCharacteristics([&] (Status s,
+ CameraMetadata metadata) {
+ ASSERT_EQ(Status::OK, s);
+ *staticMeta = clone_camera_metadata(
+ reinterpret_cast<const camera_metadata_t*>(metadata.data()));
+ ASSERT_NE(nullptr, *staticMeta);
+ });
+ ASSERT_TRUE(ret.isOk());
+}
+
+// Open a particular camera device.
+void CameraHidlTest::openCameraDevice(const std::string &name,
+ const CameraHidlEnvironment* env,
+ sp<::android::hardware::camera::device::V1_0::ICameraDevice> *device1 /*out*/) {
+ ASSERT_TRUE(nullptr != env);
+ ASSERT_TRUE(nullptr != device1);
+
+ Return<void> ret;
+ ret = env->mProvider->getCameraDeviceInterface_V1_x(
+ name,
+ [&](auto status, const auto& device) {
+ ALOGI("getCameraDeviceInterface_V1_x returns status:%d",
+ (int)status);
+ ASSERT_EQ(Status::OK, status);
+ ASSERT_NE(device, nullptr);
+ *device1 = device;
+ });
+ ASSERT_TRUE(ret.isOk());
+
+ sp<Camera1DeviceCb> deviceCb = new Camera1DeviceCb(this);
+ Return<Status> returnStatus = (*device1)->open(deviceCb);
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+}
+
+// Initialize and configure a preview window.
+void CameraHidlTest::setupPreviewWindow(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device,
+ sp<BufferItemConsumer> *bufferItemConsumer /*out*/,
+ sp<BufferItemHander> *bufferHandler /*out*/) {
+ ASSERT_NE(nullptr, device.get());
+ ASSERT_NE(nullptr, bufferItemConsumer);
+ ASSERT_NE(nullptr, bufferHandler);
+
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ *bufferItemConsumer = new BufferItemConsumer(consumer,
+ GraphicBuffer::USAGE_HW_TEXTURE); //Use GLConsumer default usage flags
+ ASSERT_NE(nullptr, (*bufferItemConsumer).get());
+ *bufferHandler = new BufferItemHander(*bufferItemConsumer);
+ ASSERT_NE(nullptr, (*bufferHandler).get());
+ (*bufferItemConsumer)->setFrameAvailableListener(*bufferHandler);
+ sp<Surface> surface = new Surface(producer);
+ sp<PreviewWindowCb> previewCb = new PreviewWindowCb(surface);
+
+ auto rc = device->setPreviewWindow(previewCb);
+ ASSERT_TRUE(rc.isOk());
+ ASSERT_EQ(Status::OK, rc);
+}
+
+// Stop camera preview and close camera.
+void CameraHidlTest::stopPreviewAndClose(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device) {
+ Return<void> ret = device->stopPreview();
+ ASSERT_TRUE(ret.isOk());
+
+ ret = device->close();
+ ASSERT_TRUE(ret.isOk());
+}
+
+// Enable a specific camera message type.
+void CameraHidlTest::enableMsgType(unsigned int msgType,
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device) {
+ Return<void> ret = device->enableMsgType(msgType);
+ ASSERT_TRUE(ret.isOk());
+
+ Return<bool> returnBoolStatus = device->msgTypeEnabled(msgType);
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_TRUE(returnBoolStatus);
+}
+
+// Disable a specific camera message type.
+void CameraHidlTest::disableMsgType(unsigned int msgType,
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device) {
+ Return<void> ret = device->disableMsgType(msgType);
+ ASSERT_TRUE(ret.isOk());
+
+ Return<bool> returnBoolStatus = device->msgTypeEnabled(msgType);
+ ASSERT_TRUE(returnBoolStatus.isOk());
+ ASSERT_FALSE(returnBoolStatus);
+}
+
+// Wait until a specific frame notification arrives.
+void CameraHidlTest::waitForFrameLocked(DataCallbackMsg msgFrame,
+ std::unique_lock<std::mutex> &l) {
+ while (msgFrame != mDataMessageTypeReceived) {
+ auto timeout = std::chrono::system_clock::now() +
+ std::chrono::seconds(kStreamBufferTimeoutSec);
+ ASSERT_NE(std::cv_status::timeout,
+ mResultCondition.wait_until(l, timeout));
+ }
+}
+
+// Start preview on a particular camera device
+void CameraHidlTest::startPreview(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device) {
+ Return<Status> returnStatus = device->startPreview();
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+}
+
+// Retrieve camera parameters.
+void CameraHidlTest::getParameters(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device,
+ CameraParameters *cameraParams /*out*/) {
+ ASSERT_NE(nullptr, cameraParams);
+
+ Return<void> ret;
+ ret = device->getParameters([&] (const ::android::hardware::hidl_string& params) {
+ ASSERT_FALSE(params.empty());
+ ::android::String8 paramString(params.c_str());
+ (*cameraParams).unflatten(paramString);
+ });
+ ASSERT_TRUE(ret.isOk());
+}
+
+// Set camera parameters.
+void CameraHidlTest::setParameters(
+ const sp<::android::hardware::camera::device::V1_0::ICameraDevice> &device,
+ const CameraParameters &cameraParams) {
+ Return<Status> returnStatus = device->setParameters(
+ cameraParams.flatten().string());
+ ASSERT_TRUE(returnStatus.isOk());
+ ASSERT_EQ(Status::OK, returnStatus);
+}
+
int main(int argc, char **argv) {
::testing::AddGlobalTestEnvironment(CameraHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);