Camera: Move common-helper target out of HIDL directory
This CL moves the common helper classes from inside the HIDL directory
and refactors the HIDL-esque namespaces to android style namespaces.
To preserve compatibility, the header files export the new symbols under
old namespace and the old build module exports symbols from the new
build module. This refactor is transparent to the codebase.
Also ran clangformat to appease the repohook gods.
Bug: 219974678
Test: Existing VTS tests pass on Cuttlefish
Change-Id: I8c3160497c1e2fe7a0a7155641f0e1f5e47ec32e
diff --git a/camera/common/default/Android.bp b/camera/common/default/Android.bp
new file mode 100644
index 0000000..e8c8f9d
--- /dev/null
+++ b/camera/common/default/Android.bp
@@ -0,0 +1,48 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+cc_library_static {
+ name: "android.hardware.camera.common-helper",
+ vendor_available: true,
+ defaults: ["hidl_defaults"],
+ srcs: [
+ "CameraModule.cpp",
+ "CameraMetadata.cpp",
+ "CameraParameters.cpp",
+ "VendorTagDescriptor.cpp",
+ "HandleImporter.cpp",
+ "Exif.cpp",
+ "SimpleThread.cpp",
+ ],
+ cflags: [
+ "-Werror",
+ "-Wextra",
+ "-Wall",
+ ],
+ shared_libs: [
+ "liblog",
+ "libgralloctypes",
+ "libhardware",
+ "libcamera_metadata",
+ "android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@3.0",
+ "android.hardware.graphics.mapper@4.0",
+ "libexif",
+ ],
+ include_dirs: ["system/media/private/camera/include"],
+ export_include_dirs: ["include"],
+}
+
+// NOTE: Deprecated module kept for compatibility reasons.
+// Depend on "android.hardware.camera.common-helper" instead
+cc_library_static {
+ name: "android.hardware.camera.common@1.0-helper",
+ vendor_available: true,
+ whole_static_libs: ["android.hardware.camera.common-helper"],
+}
diff --git a/camera/common/default/CameraMetadata.cpp b/camera/common/default/CameraMetadata.cpp
new file mode 100644
index 0000000..ed56261
--- /dev/null
+++ b/camera/common/default/CameraMetadata.cpp
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+
+#define LOG_TAG "CamComm1.0-MD"
+#include <log/log.h>
+#include <utils/Errors.h>
+
+#include "CameraMetadata.h"
+#include "VendorTagDescriptor.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+#define ALIGN_TO(val, alignment) (((uintptr_t)(val) + ((alignment)-1)) & ~((alignment)-1))
+
+CameraMetadata::CameraMetadata() : mBuffer(NULL), mLocked(false) {}
+
+CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity) : mLocked(false) {
+ mBuffer = allocate_camera_metadata(entryCapacity, dataCapacity);
+}
+
+CameraMetadata::CameraMetadata(const CameraMetadata& other) : mLocked(false) {
+ mBuffer = clone_camera_metadata(other.mBuffer);
+}
+
+CameraMetadata::CameraMetadata(camera_metadata_t* buffer) : mBuffer(NULL), mLocked(false) {
+ acquire(buffer);
+}
+
+CameraMetadata& CameraMetadata::operator=(const CameraMetadata& other) {
+ return operator=(other.mBuffer);
+}
+
+CameraMetadata& CameraMetadata::operator=(const camera_metadata_t* buffer) {
+ if (mLocked) {
+ ALOGE("%s: Assignment to a locked CameraMetadata!", __FUNCTION__);
+ return *this;
+ }
+
+ if (CC_LIKELY(buffer != mBuffer)) {
+ camera_metadata_t* newBuffer = clone_camera_metadata(buffer);
+ clear();
+ mBuffer = newBuffer;
+ }
+ return *this;
+}
+
+CameraMetadata::~CameraMetadata() {
+ mLocked = false;
+ clear();
+}
+
+const camera_metadata_t* CameraMetadata::getAndLock() const {
+ mLocked = true;
+ return mBuffer;
+}
+
+status_t CameraMetadata::unlock(const camera_metadata_t* buffer) const {
+ if (!mLocked) {
+ ALOGE("%s: Can't unlock a non-locked CameraMetadata!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if (buffer != mBuffer) {
+ ALOGE("%s: Can't unlock CameraMetadata with wrong pointer!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ mLocked = false;
+ return OK;
+}
+
+camera_metadata_t* CameraMetadata::release() {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return NULL;
+ }
+ camera_metadata_t* released = mBuffer;
+ mBuffer = NULL;
+ return released;
+}
+
+void CameraMetadata::clear() {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return;
+ }
+ if (mBuffer) {
+ free_camera_metadata(mBuffer);
+ mBuffer = NULL;
+ }
+}
+
+void CameraMetadata::acquire(camera_metadata_t* buffer) {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return;
+ }
+ clear();
+ mBuffer = buffer;
+
+ ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/ NULL) != OK,
+ "%s: Failed to validate metadata structure %p", __FUNCTION__, buffer);
+}
+
+void CameraMetadata::acquire(CameraMetadata& other) {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return;
+ }
+ acquire(other.release());
+}
+
+status_t CameraMetadata::append(const CameraMetadata& other) {
+ return append(other.mBuffer);
+}
+
+status_t CameraMetadata::append(const camera_metadata_t* other) {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ size_t extraEntries = get_camera_metadata_entry_count(other);
+ size_t extraData = get_camera_metadata_data_count(other);
+ resizeIfNeeded(extraEntries, extraData);
+
+ return append_camera_metadata(mBuffer, other);
+}
+
+size_t CameraMetadata::entryCount() const {
+ return (mBuffer == NULL) ? 0 : get_camera_metadata_entry_count(mBuffer);
+}
+
+bool CameraMetadata::isEmpty() const {
+ return entryCount() == 0;
+}
+
+status_t CameraMetadata::sort() {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ return sort_camera_metadata(mBuffer);
+}
+
+status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
+ int tagType = get_local_camera_metadata_tag_type(tag, mBuffer);
+ if (CC_UNLIKELY(tagType == -1)) {
+ ALOGE("Update metadata entry: Unknown tag %d", tag);
+ return INVALID_OPERATION;
+ }
+ if (CC_UNLIKELY(tagType != expectedType)) {
+ ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
+ "got type %s data instead ",
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag,
+ camera_metadata_type_names[tagType], camera_metadata_type_names[expectedType]);
+ return INVALID_OPERATION;
+ }
+ return OK;
+}
+
+status_t CameraMetadata::update(uint32_t tag, const int32_t* data, size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_INT32)) != OK) {
+ return res;
+ }
+ return updateImpl(tag, (const void*)data, data_count);
+}
+
+status_t CameraMetadata::update(uint32_t tag, const uint8_t* data, size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_BYTE)) != OK) {
+ return res;
+ }
+ return updateImpl(tag, (const void*)data, data_count);
+}
+
+status_t CameraMetadata::update(uint32_t tag, const float* data, size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_FLOAT)) != OK) {
+ return res;
+ }
+ return updateImpl(tag, (const void*)data, data_count);
+}
+
+status_t CameraMetadata::update(uint32_t tag, const int64_t* data, size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_INT64)) != OK) {
+ return res;
+ }
+ return updateImpl(tag, (const void*)data, data_count);
+}
+
+status_t CameraMetadata::update(uint32_t tag, const double* data, size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_DOUBLE)) != OK) {
+ return res;
+ }
+ return updateImpl(tag, (const void*)data, data_count);
+}
+
+status_t CameraMetadata::update(uint32_t tag, const camera_metadata_rational_t* data,
+ size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_RATIONAL)) != OK) {
+ return res;
+ }
+ return updateImpl(tag, (const void*)data, data_count);
+}
+
+status_t CameraMetadata::update(uint32_t tag, const String8& string) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(tag, TYPE_BYTE)) != OK) {
+ return res;
+ }
+ // string.size() doesn't count the null termination character.
+ return updateImpl(tag, (const void*)string.string(), string.size() + 1);
+}
+
+status_t CameraMetadata::update(const camera_metadata_ro_entry& entry) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if ((res = checkType(entry.tag, entry.type)) != OK) {
+ return res;
+ }
+ return updateImpl(entry.tag, (const void*)entry.data.u8, entry.count);
+}
+
+status_t CameraMetadata::updateImpl(uint32_t tag, const void* data, size_t data_count) {
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ int type = get_local_camera_metadata_tag_type(tag, mBuffer);
+ if (type == -1) {
+ ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+ // Safety check - ensure that data isn't pointing to this metadata, since
+ // that would get invalidated if a resize is needed
+ size_t bufferSize = get_camera_metadata_size(mBuffer);
+ uintptr_t bufAddr = reinterpret_cast<uintptr_t>(mBuffer);
+ uintptr_t dataAddr = reinterpret_cast<uintptr_t>(data);
+ if (dataAddr > bufAddr && dataAddr < (bufAddr + bufferSize)) {
+ ALOGE("%s: Update attempted with data from the same metadata buffer!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ size_t data_size = calculate_camera_metadata_entry_data_size(type, data_count);
+
+ res = resizeIfNeeded(1, data_size);
+
+ if (res == OK) {
+ camera_metadata_entry_t entry;
+ res = find_camera_metadata_entry(mBuffer, tag, &entry);
+ if (res == NAME_NOT_FOUND) {
+ res = add_camera_metadata_entry(mBuffer, tag, data, data_count);
+ } else if (res == OK) {
+ res = update_camera_metadata_entry(mBuffer, entry.index, data, data_count, NULL);
+ }
+ }
+
+ if (res != OK) {
+ ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)", __FUNCTION__,
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag, strerror(-res), res);
+ }
+
+ IF_ALOGV() {
+ ALOGE_IF(validate_camera_metadata_structure(mBuffer, /*size*/ NULL) != OK,
+
+ "%s: Failed to validate metadata structure after update %p", __FUNCTION__,
+ mBuffer);
+ }
+
+ return res;
+}
+
+bool CameraMetadata::exists(uint32_t tag) const {
+ camera_metadata_ro_entry entry;
+ return find_camera_metadata_ro_entry(mBuffer, tag, &entry) == 0;
+}
+
+camera_metadata_entry_t CameraMetadata::find(uint32_t tag) {
+ status_t res;
+ camera_metadata_entry entry;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ entry.count = 0;
+ return entry;
+ }
+ res = find_camera_metadata_entry(mBuffer, tag, &entry);
+ if (CC_UNLIKELY(res != OK)) {
+ entry.count = 0;
+ entry.data.u8 = NULL;
+ }
+ return entry;
+}
+
+camera_metadata_ro_entry_t CameraMetadata::find(uint32_t tag) const {
+ status_t res;
+ camera_metadata_ro_entry entry;
+ res = find_camera_metadata_ro_entry(mBuffer, tag, &entry);
+ if (CC_UNLIKELY(res != OK)) {
+ entry.count = 0;
+ entry.data.u8 = NULL;
+ }
+ return entry;
+}
+
+status_t CameraMetadata::erase(uint32_t tag) {
+ camera_metadata_entry_t entry;
+ status_t res;
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ res = find_camera_metadata_entry(mBuffer, tag, &entry);
+ if (res == NAME_NOT_FOUND) {
+ return OK;
+ } else if (res != OK) {
+ ALOGE("%s: Error looking for entry %s.%s (%x): %s %d", __FUNCTION__,
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag, strerror(-res), res);
+ return res;
+ }
+ res = delete_camera_metadata_entry(mBuffer, entry.index);
+ if (res != OK) {
+ ALOGE("%s: Error deleting entry %s.%s (%x): %s %d", __FUNCTION__,
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag, strerror(-res), res);
+ }
+ return res;
+}
+
+void CameraMetadata::dump(int fd, int verbosity, int indentation) const {
+ dump_indented_camera_metadata(mBuffer, fd, verbosity, indentation);
+}
+
+status_t CameraMetadata::resizeIfNeeded(size_t extraEntries, size_t extraData) {
+ if (mBuffer == NULL) {
+ mBuffer = allocate_camera_metadata(extraEntries * 2, extraData * 2);
+ if (mBuffer == NULL) {
+ ALOGE("%s: Can't allocate larger metadata buffer", __FUNCTION__);
+ return NO_MEMORY;
+ }
+ } else {
+ size_t currentEntryCount = get_camera_metadata_entry_count(mBuffer);
+ size_t currentEntryCap = get_camera_metadata_entry_capacity(mBuffer);
+ size_t newEntryCount = currentEntryCount + extraEntries;
+ newEntryCount = (newEntryCount > currentEntryCap) ? newEntryCount * 2 : currentEntryCap;
+
+ size_t currentDataCount = get_camera_metadata_data_count(mBuffer);
+ size_t currentDataCap = get_camera_metadata_data_capacity(mBuffer);
+ size_t newDataCount = currentDataCount + extraData;
+ newDataCount = (newDataCount > currentDataCap) ? newDataCount * 2 : currentDataCap;
+
+ if (newEntryCount > currentEntryCap || newDataCount > currentDataCap) {
+ camera_metadata_t* oldBuffer = mBuffer;
+ mBuffer = allocate_camera_metadata(newEntryCount, newDataCount);
+ if (mBuffer == NULL) {
+ ALOGE("%s: Can't allocate larger metadata buffer", __FUNCTION__);
+ return NO_MEMORY;
+ }
+ append_camera_metadata(mBuffer, oldBuffer);
+ free_camera_metadata(oldBuffer);
+ }
+ }
+ return OK;
+}
+
+void CameraMetadata::swap(CameraMetadata& other) {
+ if (mLocked) {
+ ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+ return;
+ } else if (other.mLocked) {
+ ALOGE("%s: Other CameraMetadata is locked", __FUNCTION__);
+ return;
+ }
+
+ camera_metadata* thisBuf = mBuffer;
+ camera_metadata* otherBuf = other.mBuffer;
+
+ other.mBuffer = thisBuf;
+ mBuffer = otherBuf;
+}
+
+status_t CameraMetadata::getTagFromName(const char* name, const VendorTagDescriptor* vTags,
+ uint32_t* tag) {
+ if (name == nullptr || tag == nullptr) return BAD_VALUE;
+
+ size_t nameLength = strlen(name);
+
+ const SortedVector<String8>* vendorSections;
+ size_t vendorSectionCount = 0;
+
+ if (vTags != NULL) {
+ vendorSections = vTags->getAllSectionNames();
+ vendorSectionCount = vendorSections->size();
+ }
+
+ // First, find the section by the longest string match
+ const char* section = NULL;
+ size_t sectionIndex = 0;
+ size_t sectionLength = 0;
+ size_t totalSectionCount = ANDROID_SECTION_COUNT + vendorSectionCount;
+ for (size_t i = 0; i < totalSectionCount; ++i) {
+ const char* str = (i < ANDROID_SECTION_COUNT)
+ ? camera_metadata_section_names[i]
+ : (*vendorSections)[i - ANDROID_SECTION_COUNT].string();
+
+ ALOGV("%s: Trying to match against section '%s'", __FUNCTION__, str);
+
+ if (strstr(name, str) == name) { // name begins with the section name
+ size_t strLength = strlen(str);
+
+ ALOGV("%s: Name begins with section name", __FUNCTION__);
+
+ // section name is the longest we've found so far
+ if (section == NULL || sectionLength < strLength) {
+ section = str;
+ sectionIndex = i;
+ sectionLength = strLength;
+
+ ALOGV("%s: Found new best section (%s)", __FUNCTION__, section);
+ }
+ }
+ }
+
+ if (section == NULL) {
+ return NAME_NOT_FOUND;
+ } else {
+ ALOGV("%s: Found matched section '%s' (%zu)", __FUNCTION__, section, sectionIndex);
+ }
+
+ // Get the tag name component of the name
+ const char* nameTagName = name + sectionLength + 1; // x.y.z -> z
+ if (sectionLength + 1 >= nameLength) {
+ return BAD_VALUE;
+ }
+
+ // Match rest of name against the tag names in that section only
+ uint32_t candidateTag = 0;
+ if (sectionIndex < ANDROID_SECTION_COUNT) {
+ // Match built-in tags (typically android.*)
+ uint32_t tagBegin, tagEnd; // [tagBegin, tagEnd)
+ tagBegin = camera_metadata_section_bounds[sectionIndex][0];
+ tagEnd = camera_metadata_section_bounds[sectionIndex][1];
+
+ for (candidateTag = tagBegin; candidateTag < tagEnd; ++candidateTag) {
+ const char* tagName = get_camera_metadata_tag_name(candidateTag);
+
+ if (strcmp(nameTagName, tagName) == 0) {
+ ALOGV("%s: Found matched tag '%s' (%d)", __FUNCTION__, tagName, candidateTag);
+ break;
+ }
+ }
+
+ if (candidateTag == tagEnd) {
+ return NAME_NOT_FOUND;
+ }
+ } else if (vTags != NULL) {
+ // Match vendor tags (typically com.*)
+ const String8 sectionName(section);
+ const String8 tagName(nameTagName);
+
+ status_t res = OK;
+ if ((res = vTags->lookupTag(tagName, sectionName, &candidateTag)) != OK) {
+ return NAME_NOT_FOUND;
+ }
+ }
+
+ *tag = candidateTag;
+ return OK;
+}
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
diff --git a/camera/common/default/CameraModule.cpp b/camera/common/default/CameraModule.cpp
new file mode 100644
index 0000000..9960842
--- /dev/null
+++ b/camera/common/default/CameraModule.cpp
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CamComm1.0-CamModule"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+// #define LOG_NDEBUG 0
+
+#include <utils/Trace.h>
+
+#include "CameraModule.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+void CameraModule::deriveCameraCharacteristicsKeys(uint32_t deviceVersion, CameraMetadata& chars) {
+ ATRACE_CALL();
+
+ Vector<int32_t> derivedCharKeys;
+ Vector<int32_t> derivedRequestKeys;
+ Vector<int32_t> derivedResultKeys;
+ // Keys added in HAL3.3
+ if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
+ Vector<uint8_t> controlModes;
+ uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
+ chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/ 1);
+ data = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE;
+ chars.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &data, /*count*/ 1);
+ controlModes.push(ANDROID_CONTROL_MODE_AUTO);
+ camera_metadata_entry entry = chars.find(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+ if (entry.count > 1 || entry.data.u8[0] != ANDROID_CONTROL_SCENE_MODE_DISABLED) {
+ controlModes.push(ANDROID_CONTROL_MODE_USE_SCENE_MODE);
+ }
+
+ // Only advertise CONTROL_OFF mode if 3A manual controls are supported.
+ bool isManualAeSupported = false;
+ bool isManualAfSupported = false;
+ bool isManualAwbSupported = false;
+ entry = chars.find(ANDROID_CONTROL_AE_AVAILABLE_MODES);
+ if (entry.count > 0) {
+ for (size_t i = 0; i < entry.count; i++) {
+ if (entry.data.u8[i] == ANDROID_CONTROL_AE_MODE_OFF) {
+ isManualAeSupported = true;
+ break;
+ }
+ }
+ }
+ entry = chars.find(ANDROID_CONTROL_AF_AVAILABLE_MODES);
+ if (entry.count > 0) {
+ for (size_t i = 0; i < entry.count; i++) {
+ if (entry.data.u8[i] == ANDROID_CONTROL_AF_MODE_OFF) {
+ isManualAfSupported = true;
+ break;
+ }
+ }
+ }
+ entry = chars.find(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+ if (entry.count > 0) {
+ for (size_t i = 0; i < entry.count; i++) {
+ if (entry.data.u8[i] == ANDROID_CONTROL_AWB_MODE_OFF) {
+ isManualAwbSupported = true;
+ break;
+ }
+ }
+ }
+ if (isManualAeSupported && isManualAfSupported && isManualAwbSupported) {
+ controlModes.push(ANDROID_CONTROL_MODE_OFF);
+ }
+
+ chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);
+
+ entry = chars.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
+ // HAL3.2 devices passing existing CTS test should all support all LSC modes and LSC map
+ bool lensShadingModeSupported = false;
+ if (entry.count > 0) {
+ for (size_t i = 0; i < entry.count; i++) {
+ if (entry.data.i32[i] == ANDROID_SHADING_MODE) {
+ lensShadingModeSupported = true;
+ break;
+ }
+ }
+ }
+ Vector<uint8_t> lscModes;
+ Vector<uint8_t> lscMapModes;
+ lscModes.push(ANDROID_SHADING_MODE_FAST);
+ lscModes.push(ANDROID_SHADING_MODE_HIGH_QUALITY);
+ lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
+ if (lensShadingModeSupported) {
+ lscModes.push(ANDROID_SHADING_MODE_OFF);
+ lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON);
+ }
+ chars.update(ANDROID_SHADING_AVAILABLE_MODES, lscModes);
+ chars.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, lscMapModes);
+
+ derivedCharKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
+ derivedCharKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
+ derivedCharKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
+ derivedCharKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
+ derivedCharKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
+
+ // Need update android.control.availableHighSpeedVideoConfigurations since HAL3.3
+ // adds batch size to this array.
+ entry = chars.find(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS);
+ if (entry.count > 0) {
+ Vector<int32_t> highSpeedConfig;
+ for (size_t i = 0; i < entry.count; i += 4) {
+ highSpeedConfig.add(entry.data.i32[i]); // width
+ highSpeedConfig.add(entry.data.i32[i + 1]); // height
+ highSpeedConfig.add(entry.data.i32[i + 2]); // fps_min
+ highSpeedConfig.add(entry.data.i32[i + 3]); // fps_max
+ highSpeedConfig.add(1); // batchSize_max. default to 1 for HAL3.2
+ }
+ chars.update(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
+ highSpeedConfig);
+ }
+ }
+
+ // Keys added in HAL3.4
+ if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_4) {
+ // Check if HAL supports RAW_OPAQUE output
+ camera_metadata_entry entry = chars.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+ bool supportRawOpaque = false;
+ bool supportAnyRaw = false;
+ const int STREAM_CONFIGURATION_SIZE = 4;
+ const int STREAM_FORMAT_OFFSET = 0;
+ const int STREAM_WIDTH_OFFSET = 1;
+ const int STREAM_HEIGHT_OFFSET = 2;
+ const int STREAM_IS_INPUT_OFFSET = 3;
+ Vector<int32_t> rawOpaqueSizes;
+
+ for (size_t i = 0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+ int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
+ supportRawOpaque = true;
+ rawOpaqueSizes.push(width);
+ rawOpaqueSizes.push(height);
+ // 2 bytes per pixel. This rough estimation is only used when
+ // HAL does not fill in the opaque raw size
+ rawOpaqueSizes.push(width * height * 2);
+ }
+ if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ (format == HAL_PIXEL_FORMAT_RAW16 || format == HAL_PIXEL_FORMAT_RAW10 ||
+ format == HAL_PIXEL_FORMAT_RAW12 || format == HAL_PIXEL_FORMAT_RAW_OPAQUE)) {
+ supportAnyRaw = true;
+ }
+ }
+
+ if (supportRawOpaque) {
+ entry = chars.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
+ if (entry.count == 0) {
+ // Fill in estimated value if HAL does not list it
+ chars.update(ANDROID_SENSOR_OPAQUE_RAW_SIZE, rawOpaqueSizes);
+ derivedCharKeys.push(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
+ }
+ }
+
+ // Check if HAL supports any RAW output, if so, fill in postRawSensitivityBoost range
+ if (supportAnyRaw) {
+ int32_t defaultRange[2] = {100, 100};
+ entry = chars.find(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE);
+ if (entry.count == 0) {
+ // Fill in default value (100, 100)
+ chars.update(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE, defaultRange, 2);
+ derivedCharKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE);
+ // Actual request/results will be derived by camera device.
+ derivedRequestKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
+ derivedResultKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
+ }
+ }
+ }
+
+ // Add those newly added keys to AVAILABLE_CHARACTERISTICS_KEYS
+ // This has to be done at this end of this function.
+ if (derivedCharKeys.size() > 0) {
+ appendAvailableKeys(chars, ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, derivedCharKeys);
+ }
+ if (derivedRequestKeys.size() > 0) {
+ appendAvailableKeys(chars, ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, derivedRequestKeys);
+ }
+ if (derivedResultKeys.size() > 0) {
+ appendAvailableKeys(chars, ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, derivedResultKeys);
+ }
+ return;
+}
+
+void CameraModule::appendAvailableKeys(CameraMetadata& chars, int32_t keyTag,
+ const Vector<int32_t>& appendKeys) {
+ camera_metadata_entry entry = chars.find(keyTag);
+ Vector<int32_t> availableKeys;
+ availableKeys.setCapacity(entry.count + appendKeys.size());
+ for (size_t i = 0; i < entry.count; i++) {
+ availableKeys.push(entry.data.i32[i]);
+ }
+ for (size_t i = 0; i < appendKeys.size(); i++) {
+ availableKeys.push(appendKeys[i]);
+ }
+ chars.update(keyTag, availableKeys);
+}
+
+CameraModule::CameraModule(camera_module_t* module) : mNumberOfCameras(0) {
+ if (module == NULL) {
+ ALOGE("%s: camera hardware module must not be null", __FUNCTION__);
+ assert(0);
+ }
+ mModule = module;
+}
+
+CameraModule::~CameraModule() {
+ while (mCameraInfoMap.size() > 0) {
+ camera_info cameraInfo = mCameraInfoMap.editValueAt(0);
+ if (cameraInfo.static_camera_characteristics != NULL) {
+ free_camera_metadata(
+ const_cast<camera_metadata_t*>(cameraInfo.static_camera_characteristics));
+ }
+ mCameraInfoMap.removeItemsAt(0);
+ }
+
+ while (mPhysicalCameraInfoMap.size() > 0) {
+ camera_metadata_t* metadata = mPhysicalCameraInfoMap.editValueAt(0);
+ if (metadata != NULL) {
+ free_camera_metadata(metadata);
+ }
+ mPhysicalCameraInfoMap.removeItemsAt(0);
+ }
+}
+
+int CameraModule::init() {
+ ATRACE_CALL();
+ int res = OK;
+ if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4 && mModule->init != NULL) {
+ ATRACE_BEGIN("camera_module->init");
+ res = mModule->init();
+ ATRACE_END();
+ }
+ mNumberOfCameras = getNumberOfCameras();
+ mCameraInfoMap.setCapacity(mNumberOfCameras);
+ return res;
+}
+
+int CameraModule::getCameraInfo(int cameraId, struct camera_info* info) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mCameraInfoLock);
+ if (cameraId < 0) {
+ ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+ return -EINVAL;
+ }
+
+ // Only override static_camera_characteristics for API2 devices
+ int apiVersion = mModule->common.module_api_version;
+ if (apiVersion < CAMERA_MODULE_API_VERSION_2_0) {
+ int ret;
+ ATRACE_BEGIN("camera_module->get_camera_info");
+ ret = mModule->get_camera_info(cameraId, info);
+ // Fill in this so CameraService won't be confused by
+ // possibly 0 device_version
+ info->device_version = CAMERA_DEVICE_API_VERSION_1_0;
+ ATRACE_END();
+ return ret;
+ }
+
+ ssize_t index = mCameraInfoMap.indexOfKey(cameraId);
+ if (index == NAME_NOT_FOUND) {
+ // Get camera info from raw module and cache it
+ camera_info rawInfo, cameraInfo;
+ ATRACE_BEGIN("camera_module->get_camera_info");
+ int ret = mModule->get_camera_info(cameraId, &rawInfo);
+ ATRACE_END();
+ if (ret != 0) {
+ return ret;
+ }
+ int deviceVersion = rawInfo.device_version;
+ if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_0) {
+ // static_camera_characteristics is invalid
+ *info = rawInfo;
+ return ret;
+ }
+ CameraMetadata m;
+ m.append(rawInfo.static_camera_characteristics);
+ deriveCameraCharacteristicsKeys(rawInfo.device_version, m);
+ cameraInfo = rawInfo;
+ cameraInfo.static_camera_characteristics = m.release();
+ index = mCameraInfoMap.add(cameraId, cameraInfo);
+ }
+
+ assert(index != NAME_NOT_FOUND);
+ // return the cached camera info
+ *info = mCameraInfoMap[index];
+ return OK;
+}
+
+int CameraModule::getPhysicalCameraInfo(int physicalCameraId, camera_metadata_t** physicalInfo) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mCameraInfoLock);
+ if (physicalCameraId < mNumberOfCameras) {
+ ALOGE("%s: Invalid physical camera ID %d", __FUNCTION__, physicalCameraId);
+ return -EINVAL;
+ }
+
+ // Only query physical camera info for 2.5 version for newer
+ int apiVersion = mModule->common.module_api_version;
+ if (apiVersion < CAMERA_MODULE_API_VERSION_2_5) {
+ ALOGE("%s: Module version must be at least 2.5 to handle getPhysicalCameraInfo",
+ __FUNCTION__);
+ return -ENODEV;
+ }
+ if (mModule->get_physical_camera_info == nullptr) {
+ ALOGE("%s: get_physical_camera is NULL for module version 2.5", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ ssize_t index = mPhysicalCameraInfoMap.indexOfKey(physicalCameraId);
+ if (index == NAME_NOT_FOUND) {
+ // Get physical camera characteristics, and cache it
+ camera_metadata_t* info = nullptr;
+ ATRACE_BEGIN("camera_module->get_physical_camera_info");
+ int ret = mModule->get_physical_camera_info(physicalCameraId, &info);
+ ATRACE_END();
+ if (ret != 0) {
+ return ret;
+ }
+
+ // The camera_metadata_t returned by get_physical_camera_info could be using
+ // more memory than necessary due to unused reserved space. Reduce the
+ // size by appending it to a new CameraMetadata object, which internally
+ // calls resizeIfNeeded.
+ CameraMetadata m;
+ m.append(info);
+ camera_metadata_t* derivedMetadata = m.release();
+ index = mPhysicalCameraInfoMap.add(physicalCameraId, derivedMetadata);
+ }
+
+ assert(index != NAME_NOT_FOUND);
+ *physicalInfo = mPhysicalCameraInfoMap[index];
+ return OK;
+}
+
+int CameraModule::getDeviceVersion(int cameraId) {
+ ssize_t index = mDeviceVersionMap.indexOfKey(cameraId);
+ if (index == NAME_NOT_FOUND) {
+ int deviceVersion;
+ if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
+ struct camera_info info;
+ getCameraInfo(cameraId, &info);
+ deviceVersion = info.device_version;
+ } else {
+ deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+ }
+ index = mDeviceVersionMap.add(cameraId, deviceVersion);
+ }
+ assert(index != NAME_NOT_FOUND);
+ return mDeviceVersionMap[index];
+}
+
+int CameraModule::open(const char* id, struct hw_device_t** device) {
+ int res;
+ ATRACE_BEGIN("camera_module->open");
+ res = filterOpenErrorCode(mModule->common.methods->open(&mModule->common, id, device));
+ ATRACE_END();
+ return res;
+}
+
+bool CameraModule::isOpenLegacyDefined() const {
+ if (getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_3) {
+ return false;
+ }
+ return mModule->open_legacy != NULL;
+}
+
+int CameraModule::openLegacy(const char* id, uint32_t halVersion, struct hw_device_t** device) {
+ int res;
+ ATRACE_BEGIN("camera_module->open_legacy");
+ res = mModule->open_legacy(&mModule->common, id, halVersion, device);
+ ATRACE_END();
+ return res;
+}
+
+int CameraModule::getNumberOfCameras() {
+ int numCameras;
+ ATRACE_BEGIN("camera_module->get_number_of_cameras");
+ numCameras = mModule->get_number_of_cameras();
+ ATRACE_END();
+ return numCameras;
+}
+
+int CameraModule::setCallbacks(const camera_module_callbacks_t* callbacks) {
+ int res = OK;
+ ATRACE_BEGIN("camera_module->set_callbacks");
+ if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_1) {
+ res = mModule->set_callbacks(callbacks);
+ }
+ ATRACE_END();
+ return res;
+}
+
+bool CameraModule::isVendorTagDefined() const {
+ return mModule->get_vendor_tag_ops != NULL;
+}
+
+void CameraModule::getVendorTagOps(vendor_tag_ops_t* ops) {
+ if (mModule->get_vendor_tag_ops) {
+ ATRACE_BEGIN("camera_module->get_vendor_tag_ops");
+ mModule->get_vendor_tag_ops(ops);
+ ATRACE_END();
+ }
+}
+
+bool CameraModule::isSetTorchModeSupported() const {
+ if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
+ if (mModule->set_torch_mode == NULL) {
+ ALOGE("%s: Module 2.4 device must support set torch API!", __FUNCTION__);
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+int CameraModule::setTorchMode(const char* camera_id, bool enable) {
+ int res = INVALID_OPERATION;
+ if (mModule->set_torch_mode != NULL) {
+ ATRACE_BEGIN("camera_module->set_torch_mode");
+ res = mModule->set_torch_mode(camera_id, enable);
+ ATRACE_END();
+ }
+ return res;
+}
+
+int CameraModule::isStreamCombinationSupported(int cameraId, camera_stream_combination_t* streams) {
+ int res = INVALID_OPERATION;
+ if (mModule->is_stream_combination_supported != NULL) {
+ ATRACE_BEGIN("camera_module->is_stream_combination_supported");
+ res = mModule->is_stream_combination_supported(cameraId, streams);
+ ATRACE_END();
+ }
+ return res;
+}
+
+void CameraModule::notifyDeviceStateChange(uint64_t deviceState) {
+ if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_5 &&
+ mModule->notify_device_state_change != NULL) {
+ ATRACE_BEGIN("camera_module->notify_device_state_change");
+ ALOGI("%s: calling notify_device_state_change with state %" PRId64, __FUNCTION__,
+ deviceState);
+ mModule->notify_device_state_change(deviceState);
+ ATRACE_END();
+ }
+}
+
+bool CameraModule::isLogicalMultiCamera(const common::helper::CameraMetadata& metadata,
+ std::unordered_set<std::string>* physicalCameraIds) {
+ if (physicalCameraIds == nullptr) {
+ ALOGE("%s: physicalCameraIds must not be null", __FUNCTION__);
+ return false;
+ }
+
+ bool isLogicalMultiCamera = false;
+ camera_metadata_ro_entry_t capabilities = metadata.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ for (size_t i = 0; i < capabilities.count; i++) {
+ if (capabilities.data.u8[i] ==
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ isLogicalMultiCamera = true;
+ break;
+ }
+ }
+
+ if (isLogicalMultiCamera) {
+ camera_metadata_ro_entry_t entry = metadata.find(ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS);
+ const uint8_t* ids = entry.data.u8;
+ size_t start = 0;
+ for (size_t i = 0; i < entry.count; ++i) {
+ if (ids[i] == '\0') {
+ if (start != i) {
+ const char* physicalId = reinterpret_cast<const char*>(ids + start);
+ physicalCameraIds->emplace(physicalId);
+ }
+ start = i + 1;
+ }
+ }
+ }
+ return isLogicalMultiCamera;
+}
+
+status_t CameraModule::filterOpenErrorCode(status_t err) {
+ switch (err) {
+ case NO_ERROR:
+ case -EBUSY:
+ case -EINVAL:
+ case -EUSERS:
+ return err;
+ default:
+ break;
+ }
+ return -ENODEV;
+}
+
+void CameraModule::removeCamera(int cameraId) {
+ // Skip HAL1 devices which isn't cached in mCameraInfoMap and don't advertise
+ // static_camera_characteristics
+ if (getDeviceVersion(cameraId) >= CAMERA_DEVICE_API_VERSION_3_0) {
+ std::unordered_set<std::string> physicalIds;
+ camera_metadata_t* metadata = const_cast<camera_metadata_t*>(
+ mCameraInfoMap.valueFor(cameraId).static_camera_characteristics);
+ common::helper::CameraMetadata hidlMetadata(metadata);
+
+ if (isLogicalMultiCamera(hidlMetadata, &physicalIds)) {
+ for (const auto& id : physicalIds) {
+ int idInt = std::stoi(id);
+ if (mPhysicalCameraInfoMap.indexOfKey(idInt) >= 0) {
+ free_camera_metadata(mPhysicalCameraInfoMap[idInt]);
+ mPhysicalCameraInfoMap.removeItem(idInt);
+ } else {
+ ALOGE("%s: Cannot find corresponding static metadata for physical id %s",
+ __FUNCTION__, id.c_str());
+ }
+ }
+ }
+ }
+
+ mCameraInfoMap.removeItem(cameraId);
+ mDeviceVersionMap.removeItem(cameraId);
+}
+
+uint16_t CameraModule::getModuleApiVersion() const {
+ return mModule->common.module_api_version;
+}
+
+const char* CameraModule::getModuleName() const {
+ return mModule->common.name;
+}
+
+uint16_t CameraModule::getHalApiVersion() const {
+ return mModule->common.hal_api_version;
+}
+
+const char* CameraModule::getModuleAuthor() const {
+ return mModule->common.author;
+}
+
+void* CameraModule::getDso() {
+ return mModule->common.dso;
+}
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
diff --git a/camera/common/default/CameraParameters.cpp b/camera/common/default/CameraParameters.cpp
new file mode 100644
index 0000000..37e28a2
--- /dev/null
+++ b/camera/common/default/CameraParameters.cpp
@@ -0,0 +1,503 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "CameraParams"
+#include <log/log.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <system/graphics.h>
+#include <unistd.h>
+#include "CameraParameters.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+// Parameter keys to communicate between camera application and driver.
+const char CameraParameters::KEY_PREVIEW_SIZE[] = "preview-size";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES[] = "preview-size-values";
+const char CameraParameters::KEY_PREVIEW_FORMAT[] = "preview-format";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS[] = "preview-format-values";
+const char CameraParameters::KEY_PREVIEW_FRAME_RATE[] = "preview-frame-rate";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES[] = "preview-frame-rate-values";
+const char CameraParameters::KEY_PREVIEW_FPS_RANGE[] = "preview-fps-range";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE[] = "preview-fps-range-values";
+const char CameraParameters::KEY_PICTURE_SIZE[] = "picture-size";
+const char CameraParameters::KEY_SUPPORTED_PICTURE_SIZES[] = "picture-size-values";
+const char CameraParameters::KEY_PICTURE_FORMAT[] = "picture-format";
+const char CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS[] = "picture-format-values";
+const char CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH[] = "jpeg-thumbnail-width";
+const char CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT[] = "jpeg-thumbnail-height";
+const char CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[] = "jpeg-thumbnail-size-values";
+const char CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY[] = "jpeg-thumbnail-quality";
+const char CameraParameters::KEY_JPEG_QUALITY[] = "jpeg-quality";
+const char CameraParameters::KEY_ROTATION[] = "rotation";
+const char CameraParameters::KEY_GPS_LATITUDE[] = "gps-latitude";
+const char CameraParameters::KEY_GPS_LONGITUDE[] = "gps-longitude";
+const char CameraParameters::KEY_GPS_ALTITUDE[] = "gps-altitude";
+const char CameraParameters::KEY_GPS_TIMESTAMP[] = "gps-timestamp";
+const char CameraParameters::KEY_GPS_PROCESSING_METHOD[] = "gps-processing-method";
+const char CameraParameters::KEY_WHITE_BALANCE[] = "whitebalance";
+const char CameraParameters::KEY_SUPPORTED_WHITE_BALANCE[] = "whitebalance-values";
+const char CameraParameters::KEY_EFFECT[] = "effect";
+const char CameraParameters::KEY_SUPPORTED_EFFECTS[] = "effect-values";
+const char CameraParameters::KEY_ANTIBANDING[] = "antibanding";
+const char CameraParameters::KEY_SUPPORTED_ANTIBANDING[] = "antibanding-values";
+const char CameraParameters::KEY_SCENE_MODE[] = "scene-mode";
+const char CameraParameters::KEY_SUPPORTED_SCENE_MODES[] = "scene-mode-values";
+const char CameraParameters::KEY_FLASH_MODE[] = "flash-mode";
+const char CameraParameters::KEY_SUPPORTED_FLASH_MODES[] = "flash-mode-values";
+const char CameraParameters::KEY_FOCUS_MODE[] = "focus-mode";
+const char CameraParameters::KEY_SUPPORTED_FOCUS_MODES[] = "focus-mode-values";
+const char CameraParameters::KEY_MAX_NUM_FOCUS_AREAS[] = "max-num-focus-areas";
+const char CameraParameters::KEY_FOCUS_AREAS[] = "focus-areas";
+const char CameraParameters::KEY_FOCAL_LENGTH[] = "focal-length";
+const char CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE[] = "horizontal-view-angle";
+const char CameraParameters::KEY_VERTICAL_VIEW_ANGLE[] = "vertical-view-angle";
+const char CameraParameters::KEY_EXPOSURE_COMPENSATION[] = "exposure-compensation";
+const char CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION[] = "max-exposure-compensation";
+const char CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION[] = "min-exposure-compensation";
+const char CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP[] = "exposure-compensation-step";
+const char CameraParameters::KEY_AUTO_EXPOSURE_LOCK[] = "auto-exposure-lock";
+const char CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[] = "auto-exposure-lock-supported";
+const char CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK[] = "auto-whitebalance-lock";
+const char CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[] =
+ "auto-whitebalance-lock-supported";
+const char CameraParameters::KEY_MAX_NUM_METERING_AREAS[] = "max-num-metering-areas";
+const char CameraParameters::KEY_METERING_AREAS[] = "metering-areas";
+const char CameraParameters::KEY_ZOOM[] = "zoom";
+const char CameraParameters::KEY_MAX_ZOOM[] = "max-zoom";
+const char CameraParameters::KEY_ZOOM_RATIOS[] = "zoom-ratios";
+const char CameraParameters::KEY_ZOOM_SUPPORTED[] = "zoom-supported";
+const char CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED[] = "smooth-zoom-supported";
+const char CameraParameters::KEY_FOCUS_DISTANCES[] = "focus-distances";
+const char CameraParameters::KEY_VIDEO_FRAME_FORMAT[] = "video-frame-format";
+const char CameraParameters::KEY_VIDEO_SIZE[] = "video-size";
+const char CameraParameters::KEY_SUPPORTED_VIDEO_SIZES[] = "video-size-values";
+const char CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[] =
+ "preferred-preview-size-for-video";
+const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW[] = "max-num-detected-faces-hw";
+const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW[] = "max-num-detected-faces-sw";
+const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint";
+const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
+const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
+const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
+const char CameraParameters::KEY_LIGHTFX[] = "light-fx";
+
+const char CameraParameters::TRUE[] = "true";
+const char CameraParameters::FALSE[] = "false";
+const char CameraParameters::FOCUS_DISTANCE_INFINITY[] = "Infinity";
+
+// Values for white balance settings.
+const char CameraParameters::WHITE_BALANCE_AUTO[] = "auto";
+const char CameraParameters::WHITE_BALANCE_INCANDESCENT[] = "incandescent";
+const char CameraParameters::WHITE_BALANCE_FLUORESCENT[] = "fluorescent";
+const char CameraParameters::WHITE_BALANCE_WARM_FLUORESCENT[] = "warm-fluorescent";
+const char CameraParameters::WHITE_BALANCE_DAYLIGHT[] = "daylight";
+const char CameraParameters::WHITE_BALANCE_CLOUDY_DAYLIGHT[] = "cloudy-daylight";
+const char CameraParameters::WHITE_BALANCE_TWILIGHT[] = "twilight";
+const char CameraParameters::WHITE_BALANCE_SHADE[] = "shade";
+
+// Values for effect settings.
+const char CameraParameters::EFFECT_NONE[] = "none";
+const char CameraParameters::EFFECT_MONO[] = "mono";
+const char CameraParameters::EFFECT_NEGATIVE[] = "negative";
+const char CameraParameters::EFFECT_SOLARIZE[] = "solarize";
+const char CameraParameters::EFFECT_SEPIA[] = "sepia";
+const char CameraParameters::EFFECT_POSTERIZE[] = "posterize";
+const char CameraParameters::EFFECT_WHITEBOARD[] = "whiteboard";
+const char CameraParameters::EFFECT_BLACKBOARD[] = "blackboard";
+const char CameraParameters::EFFECT_AQUA[] = "aqua";
+
+// Values for antibanding settings.
+const char CameraParameters::ANTIBANDING_AUTO[] = "auto";
+const char CameraParameters::ANTIBANDING_50HZ[] = "50hz";
+const char CameraParameters::ANTIBANDING_60HZ[] = "60hz";
+const char CameraParameters::ANTIBANDING_OFF[] = "off";
+
+// Values for flash mode settings.
+const char CameraParameters::FLASH_MODE_OFF[] = "off";
+const char CameraParameters::FLASH_MODE_AUTO[] = "auto";
+const char CameraParameters::FLASH_MODE_ON[] = "on";
+const char CameraParameters::FLASH_MODE_RED_EYE[] = "red-eye";
+const char CameraParameters::FLASH_MODE_TORCH[] = "torch";
+
+// Values for scene mode settings.
+const char CameraParameters::SCENE_MODE_AUTO[] = "auto";
+const char CameraParameters::SCENE_MODE_ACTION[] = "action";
+const char CameraParameters::SCENE_MODE_PORTRAIT[] = "portrait";
+const char CameraParameters::SCENE_MODE_LANDSCAPE[] = "landscape";
+const char CameraParameters::SCENE_MODE_NIGHT[] = "night";
+const char CameraParameters::SCENE_MODE_NIGHT_PORTRAIT[] = "night-portrait";
+const char CameraParameters::SCENE_MODE_THEATRE[] = "theatre";
+const char CameraParameters::SCENE_MODE_BEACH[] = "beach";
+const char CameraParameters::SCENE_MODE_SNOW[] = "snow";
+const char CameraParameters::SCENE_MODE_SUNSET[] = "sunset";
+const char CameraParameters::SCENE_MODE_STEADYPHOTO[] = "steadyphoto";
+const char CameraParameters::SCENE_MODE_FIREWORKS[] = "fireworks";
+const char CameraParameters::SCENE_MODE_SPORTS[] = "sports";
+const char CameraParameters::SCENE_MODE_PARTY[] = "party";
+const char CameraParameters::SCENE_MODE_CANDLELIGHT[] = "candlelight";
+const char CameraParameters::SCENE_MODE_BARCODE[] = "barcode";
+const char CameraParameters::SCENE_MODE_HDR[] = "hdr";
+
+const char CameraParameters::PIXEL_FORMAT_YUV422SP[] = "yuv422sp";
+const char CameraParameters::PIXEL_FORMAT_YUV420SP[] = "yuv420sp";
+const char CameraParameters::PIXEL_FORMAT_YUV422I[] = "yuv422i-yuyv";
+const char CameraParameters::PIXEL_FORMAT_YUV420P[] = "yuv420p";
+const char CameraParameters::PIXEL_FORMAT_RGB565[] = "rgb565";
+const char CameraParameters::PIXEL_FORMAT_RGBA8888[] = "rgba8888";
+const char CameraParameters::PIXEL_FORMAT_JPEG[] = "jpeg";
+const char CameraParameters::PIXEL_FORMAT_BAYER_RGGB[] = "bayer-rggb";
+const char CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE[] = "android-opaque";
+
+// Values for focus mode settings.
+const char CameraParameters::FOCUS_MODE_AUTO[] = "auto";
+const char CameraParameters::FOCUS_MODE_INFINITY[] = "infinity";
+const char CameraParameters::FOCUS_MODE_MACRO[] = "macro";
+const char CameraParameters::FOCUS_MODE_FIXED[] = "fixed";
+const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
+const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
+const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
+
+// Values for light fx settings
+const char CameraParameters::LIGHTFX_LOWLIGHT[] = "low-light";
+const char CameraParameters::LIGHTFX_HDR[] = "high-dynamic-range";
+
+CameraParameters::CameraParameters() : mMap() {}
+
+CameraParameters::~CameraParameters() {}
+
+String8 CameraParameters::flatten() const {
+ String8 flattened("");
+ size_t size = mMap.size();
+
+ for (size_t i = 0; i < size; i++) {
+ String8 k, v;
+ k = mMap.keyAt(i);
+ v = mMap.valueAt(i);
+
+ flattened += k;
+ flattened += "=";
+ flattened += v;
+ if (i != size - 1) flattened += ";";
+ }
+
+ return flattened;
+}
+
+void CameraParameters::unflatten(const String8& params) {
+ const char* a = params.string();
+ const char* b;
+
+ mMap.clear();
+
+ for (;;) {
+ // Find the bounds of the key name.
+ b = strchr(a, '=');
+ if (b == 0) break;
+
+ // Create the key string.
+ String8 k(a, (size_t)(b - a));
+
+ // Find the value.
+ a = b + 1;
+ b = strchr(a, ';');
+ if (b == 0) {
+ // If there's no semicolon, this is the last item.
+ String8 v(a);
+ mMap.add(k, v);
+ break;
+ }
+
+ String8 v(a, (size_t)(b - a));
+ mMap.add(k, v);
+ a = b + 1;
+ }
+}
+
+void CameraParameters::set(const char* key, const char* value) {
+ // i think i can do this with strspn()
+ if (strchr(key, '=') || strchr(key, ';')) {
+ // ALOGE("Key \"%s\"contains invalid character (= or ;)", key);
+ return;
+ }
+
+ if (strchr(value, '=') || strchr(value, ';')) {
+ // ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
+ return;
+ }
+
+ mMap.replaceValueFor(String8(key), String8(value));
+}
+
+void CameraParameters::set(const char* key, int value) {
+ char str[16];
+ sprintf(str, "%d", value);
+ set(key, str);
+}
+
+void CameraParameters::setFloat(const char* key, float value) {
+ char str[16]; // 14 should be enough. We overestimate to be safe.
+ snprintf(str, sizeof(str), "%g", value);
+ set(key, str);
+}
+
+const char* CameraParameters::get(const char* key) const {
+ String8 v = mMap.valueFor(String8(key));
+ if (v.length() == 0) return 0;
+ return v.string();
+}
+
+int CameraParameters::getInt(const char* key) const {
+ const char* v = get(key);
+ if (v == 0) return -1;
+ return strtol(v, 0, 0);
+}
+
+float CameraParameters::getFloat(const char* key) const {
+ const char* v = get(key);
+ if (v == 0) return -1;
+ return strtof(v, 0);
+}
+
+void CameraParameters::remove(const char* key) {
+ mMap.removeItem(String8(key));
+}
+
+// Parse string like "640x480" or "10000,20000"
+static int parse_pair(const char* str, int* first, int* second, char delim, char** endptr = NULL) {
+ // Find the first integer.
+ char* end;
+ int w = (int)strtol(str, &end, 10);
+ // If a delimeter does not immediately follow, give up.
+ if (*end != delim) {
+ ALOGE("Cannot find delimeter (%c) in str=%s", delim, str);
+ return -1;
+ }
+
+ // Find the second integer, immediately after the delimeter.
+ int h = (int)strtol(end + 1, &end, 10);
+
+ *first = w;
+ *second = h;
+
+ if (endptr) {
+ *endptr = end;
+ }
+
+ return 0;
+}
+
+static void parseSizesList(const char* sizesStr, Vector<Size>& sizes) {
+ if (sizesStr == 0) {
+ return;
+ }
+
+ char* sizeStartPtr = (char*)sizesStr;
+
+ while (true) {
+ int width, height;
+ int success = parse_pair(sizeStartPtr, &width, &height, 'x', &sizeStartPtr);
+ if (success == -1 || (*sizeStartPtr != ',' && *sizeStartPtr != '\0')) {
+ ALOGE("Picture sizes string \"%s\" contains invalid character.", sizesStr);
+ return;
+ }
+ sizes.push(Size(width, height));
+
+ if (*sizeStartPtr == '\0') {
+ return;
+ }
+ sizeStartPtr++;
+ }
+}
+
+void CameraParameters::setPreviewSize(int width, int height) {
+ char str[32];
+ sprintf(str, "%dx%d", width, height);
+ set(KEY_PREVIEW_SIZE, str);
+}
+
+void CameraParameters::getPreviewSize(int* width, int* height) const {
+ *width = *height = -1;
+ // Get the current string, if it doesn't exist, leave the -1x-1
+ const char* p = get(KEY_PREVIEW_SIZE);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getPreferredPreviewSizeForVideo(int* width, int* height) const {
+ *width = *height = -1;
+ const char* p = get(KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getSupportedPreviewSizes(Vector<Size>& sizes) const {
+ const char* previewSizesStr = get(KEY_SUPPORTED_PREVIEW_SIZES);
+ parseSizesList(previewSizesStr, sizes);
+}
+
+void CameraParameters::setVideoSize(int width, int height) {
+ char str[32];
+ sprintf(str, "%dx%d", width, height);
+ set(KEY_VIDEO_SIZE, str);
+}
+
+void CameraParameters::getVideoSize(int* width, int* height) const {
+ *width = *height = -1;
+ const char* p = get(KEY_VIDEO_SIZE);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getSupportedVideoSizes(Vector<Size>& sizes) const {
+ const char* videoSizesStr = get(KEY_SUPPORTED_VIDEO_SIZES);
+ parseSizesList(videoSizesStr, sizes);
+}
+
+void CameraParameters::setPreviewFrameRate(int fps) {
+ set(KEY_PREVIEW_FRAME_RATE, fps);
+}
+
+int CameraParameters::getPreviewFrameRate() const {
+ return getInt(KEY_PREVIEW_FRAME_RATE);
+}
+
+void CameraParameters::getPreviewFpsRange(int* min_fps, int* max_fps) const {
+ *min_fps = *max_fps = -1;
+ const char* p = get(KEY_PREVIEW_FPS_RANGE);
+ if (p == 0) return;
+ parse_pair(p, min_fps, max_fps, ',');
+}
+
+void CameraParameters::setPreviewFormat(const char* format) {
+ set(KEY_PREVIEW_FORMAT, format);
+}
+
+const char* CameraParameters::getPreviewFormat() const {
+ return get(KEY_PREVIEW_FORMAT);
+}
+
+void CameraParameters::setPictureSize(int width, int height) {
+ char str[32];
+ sprintf(str, "%dx%d", width, height);
+ set(KEY_PICTURE_SIZE, str);
+}
+
+void CameraParameters::getPictureSize(int* width, int* height) const {
+ *width = *height = -1;
+ // Get the current string, if it doesn't exist, leave the -1x-1
+ const char* p = get(KEY_PICTURE_SIZE);
+ if (p == 0) return;
+ parse_pair(p, width, height, 'x');
+}
+
+void CameraParameters::getSupportedPictureSizes(Vector<Size>& sizes) const {
+ const char* pictureSizesStr = get(KEY_SUPPORTED_PICTURE_SIZES);
+ parseSizesList(pictureSizesStr, sizes);
+}
+
+void CameraParameters::setPictureFormat(const char* format) {
+ set(KEY_PICTURE_FORMAT, format);
+}
+
+const char* CameraParameters::getPictureFormat() const {
+ return get(KEY_PICTURE_FORMAT);
+}
+
+void CameraParameters::dump() const {
+ ALOGD("dump: mMap.size = %zu", mMap.size());
+ for (size_t i = 0; i < mMap.size(); i++) {
+ String8 k, v;
+ k = mMap.keyAt(i);
+ v = mMap.valueAt(i);
+ ALOGD("%s: %s\n", k.string(), v.string());
+ }
+}
+
+status_t CameraParameters::dump(int fd, const Vector<String16>& /*args*/) const {
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+ snprintf(buffer, 255, "CameraParameters::dump: mMap.size = %zu\n", mMap.size());
+ result.append(buffer);
+ for (size_t i = 0; i < mMap.size(); i++) {
+ String8 k, v;
+ k = mMap.keyAt(i);
+ v = mMap.valueAt(i);
+ snprintf(buffer, 255, "\t%s: %s\n", k.string(), v.string());
+ result.append(buffer);
+ }
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
+void CameraParameters::getSupportedPreviewFormats(Vector<int>& formats) const {
+ const char* supportedPreviewFormats = get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
+
+ if (supportedPreviewFormats == NULL) {
+ ALOGW("%s: No supported preview formats.", __FUNCTION__);
+ return;
+ }
+
+ String8 fmtStr(supportedPreviewFormats);
+ char* prevFmts = fmtStr.lockBuffer(fmtStr.size());
+
+ char* savePtr;
+ char* fmt = strtok_r(prevFmts, ",", &savePtr);
+ while (fmt) {
+ int actual = previewFormatToEnum(fmt);
+ if (actual != -1) {
+ formats.add(actual);
+ }
+ fmt = strtok_r(NULL, ",", &savePtr);
+ }
+ fmtStr.unlockBuffer(fmtStr.size());
+}
+
+int CameraParameters::previewFormatToEnum(const char* format) {
+ return !format ? HAL_PIXEL_FORMAT_YCrCb_420_SP
+ : !strcmp(format, PIXEL_FORMAT_YUV422SP) ? HAL_PIXEL_FORMAT_YCbCr_422_SP
+ : // NV16
+ !strcmp(format, PIXEL_FORMAT_YUV420SP) ? HAL_PIXEL_FORMAT_YCrCb_420_SP
+ : // NV21
+ !strcmp(format, PIXEL_FORMAT_YUV422I) ? HAL_PIXEL_FORMAT_YCbCr_422_I
+ : // YUY2
+ !strcmp(format, PIXEL_FORMAT_YUV420P) ? HAL_PIXEL_FORMAT_YV12
+ : // YV12
+ !strcmp(format, PIXEL_FORMAT_RGB565) ? HAL_PIXEL_FORMAT_RGB_565
+ : // RGB565
+ !strcmp(format, PIXEL_FORMAT_RGBA8888) ? HAL_PIXEL_FORMAT_RGBA_8888
+ : // RGB8888
+ !strcmp(format, PIXEL_FORMAT_BAYER_RGGB) ? HAL_PIXEL_FORMAT_RAW16
+ : // Raw sensor data
+ -1;
+}
+
+bool CameraParameters::isEmpty() const {
+ return mMap.isEmpty();
+}
+
+}; // namespace helper
+}; // namespace common
+}; // namespace camera
+}; // namespace hardware
+}; // namespace android
diff --git a/camera/common/default/Exif.cpp b/camera/common/default/Exif.cpp
new file mode 100644
index 0000000..f4b2a31
--- /dev/null
+++ b/camera/common/default/Exif.cpp
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CamComm1.0-Exif"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+// #define LOG_NDEBUG 0
+
+#include <android/log.h>
+
+#include <inttypes.h>
+#include <math.h>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include "Exif.h"
+
+extern "C" {
+#include <libexif/exif-data.h>
+}
+
+namespace std {
+
+template <>
+struct default_delete<ExifEntry> {
+ inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
+};
+
+} // namespace std
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+class ExifUtilsImpl : public ExifUtils {
+ public:
+ ExifUtilsImpl();
+
+ virtual ~ExifUtilsImpl();
+
+ // Initialize() can be called multiple times. The setting of Exif tags will be
+ // cleared.
+ virtual bool initialize();
+
+ // set all known fields from a metadata structure
+ virtual bool setFromMetadata(const CameraMetadata& metadata, const size_t imageWidth,
+ const size_t imageHeight);
+
+ // sets the len aperture.
+ // Returns false if memory allocation fails.
+ virtual bool setAperture(uint32_t numerator, uint32_t denominator);
+
+ // sets the value of brightness.
+ // Returns false if memory allocation fails.
+ virtual bool setBrightness(int32_t numerator, int32_t denominator);
+
+ // sets the color space.
+ // Returns false if memory allocation fails.
+ virtual bool setColorSpace(uint16_t color_space);
+
+ // sets the information to compressed data.
+ // Returns false if memory allocation fails.
+ virtual bool setComponentsConfiguration(const std::string& components_configuration);
+
+ // sets the compression scheme used for the image data.
+ // Returns false if memory allocation fails.
+ virtual bool setCompression(uint16_t compression);
+
+ // sets image contrast.
+ // Returns false if memory allocation fails.
+ virtual bool setContrast(uint16_t contrast);
+
+ // sets the date and time of image last modified. It takes local time. The
+ // name of the tag is DateTime in IFD0.
+ // Returns false if memory allocation fails.
+ virtual bool setDateTime(const struct tm& t);
+
+ // sets the image description.
+ // Returns false if memory allocation fails.
+ virtual bool setDescription(const std::string& description);
+
+ // sets the digital zoom ratio. If the numerator is 0, it means digital zoom
+ // was not used.
+ // Returns false if memory allocation fails.
+ virtual bool setDigitalZoomRatio(uint32_t numerator, uint32_t denominator);
+
+ // sets the exposure bias.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureBias(int32_t numerator, int32_t denominator);
+
+ // sets the exposure mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureMode(uint16_t exposure_mode);
+
+ // sets the program used by the camera to set exposure when the picture is
+ // taken.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureProgram(uint16_t exposure_program);
+
+ // sets the exposure time, given in seconds.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureTime(uint32_t numerator, uint32_t denominator);
+
+ // sets the status of flash.
+ // Returns false if memory allocation fails.
+ virtual bool setFlash(uint16_t flash);
+
+ // sets the F number.
+ // Returns false if memory allocation fails.
+ virtual bool setFNumber(uint32_t numerator, uint32_t denominator);
+
+ // sets the focal length of lens used to take the image in millimeters.
+ // Returns false if memory allocation fails.
+ virtual bool setFocalLength(uint32_t numerator, uint32_t denominator);
+
+ // sets the degree of overall image gain adjustment.
+ // Returns false if memory allocation fails.
+ virtual bool setGainControl(uint16_t gain_control);
+
+ // sets the altitude in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsAltitude(double altitude);
+
+ // sets the latitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLatitude(double latitude);
+
+ // sets the longitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLongitude(double longitude);
+
+ // sets GPS processing method.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsProcessingMethod(const std::string& method);
+
+ // sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsTimestamp(const struct tm& t);
+
+ // sets the length (number of rows) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageHeight(uint32_t length);
+
+ // sets the width (number of columes) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageWidth(uint32_t width);
+
+ // sets the ISO speed.
+ // Returns false if memory allocation fails.
+ virtual bool setIsoSpeedRating(uint16_t iso_speed_ratings);
+
+ // sets the kind of light source.
+ // Returns false if memory allocation fails.
+ virtual bool setLightSource(uint16_t light_source);
+
+ // sets the smallest F number of the lens.
+ // Returns false if memory allocation fails.
+ virtual bool setMaxAperture(uint32_t numerator, uint32_t denominator);
+
+ // sets the metering mode.
+ // Returns false if memory allocation fails.
+ virtual bool setMeteringMode(uint16_t metering_mode);
+
+ // sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientation(uint16_t orientation);
+
+ // sets the unit for measuring XResolution and YResolution.
+ // Returns false if memory allocation fails.
+ virtual bool setResolutionUnit(uint16_t resolution_unit);
+
+ // sets image saturation.
+ // Returns false if memory allocation fails.
+ virtual bool setSaturation(uint16_t saturation);
+
+ // sets the type of scene that was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setSceneCaptureType(uint16_t type);
+
+ // sets image sharpness.
+ // Returns false if memory allocation fails.
+ virtual bool setSharpness(uint16_t sharpness);
+
+ // sets the shutter speed.
+ // Returns false if memory allocation fails.
+ virtual bool setShutterSpeed(int32_t numerator, int32_t denominator);
+
+ // sets the distance to the subject, given in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setSubjectDistance(uint32_t numerator, uint32_t denominator);
+
+ // sets the fractions of seconds for the <DateTime> tag.
+ // Returns false if memory allocation fails.
+ virtual bool setSubsecTime(const std::string& subsec_time);
+
+ // sets the white balance mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setWhiteBalance(uint16_t white_balance);
+
+ // sets the number of pixels per resolution unit in the image width.
+ // Returns false if memory allocation fails.
+ virtual bool setXResolution(uint32_t numerator, uint32_t denominator);
+
+ // sets the position of chrominance components in relation to the luminance
+ // component.
+ // Returns false if memory allocation fails.
+ virtual bool setYCbCrPositioning(uint16_t ycbcr_positioning);
+
+ // sets the number of pixels per resolution unit in the image length.
+ // Returns false if memory allocation fails.
+ virtual bool setYResolution(uint32_t numerator, uint32_t denominator);
+
+ // sets the manufacturer of camera.
+ // Returns false if memory allocation fails.
+ virtual bool setMake(const std::string& make);
+
+ // sets the model number of camera.
+ // Returns false if memory allocation fails.
+ virtual bool setModel(const std::string& model);
+
+ // Generates APP1 segment.
+ // Returns false if generating APP1 segment fails.
+ virtual bool generateApp1(const void* thumbnail_buffer, uint32_t size);
+
+ // Gets buffer of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual const uint8_t* getApp1Buffer();
+
+ // Gets length of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual unsigned int getApp1Length();
+
+ protected:
+ // sets the version of this standard supported.
+ // Returns false if memory allocation fails.
+ virtual bool setExifVersion(const std::string& exif_version);
+
+ // Resets the pointers and memories.
+ virtual void reset();
+
+ // Adds a variable length tag to |exif_data_|. It will remove the original one
+ // if the tag exists.
+ // Returns the entry of the tag. The reference count of returned ExifEntry is
+ // two.
+ virtual std::unique_ptr<ExifEntry> addVariableLengthEntry(ExifIfd ifd, ExifTag tag,
+ ExifFormat format,
+ uint64_t components,
+ unsigned int size);
+
+ // Adds a entry of |tag| in |exif_data_|. It won't remove the original one if
+ // the tag exists.
+ // Returns the entry of the tag. It adds one reference count to returned
+ // ExifEntry.
+ virtual std::unique_ptr<ExifEntry> addEntry(ExifIfd ifd, ExifTag tag);
+
+ // Helpe functions to add exif data with different types.
+ virtual bool setShort(ExifIfd ifd, ExifTag tag, uint16_t value, const std::string& msg);
+
+ virtual bool setLong(ExifIfd ifd, ExifTag tag, uint32_t value, const std::string& msg);
+
+ virtual bool setRational(ExifIfd ifd, ExifTag tag, uint32_t numerator, uint32_t denominator,
+ const std::string& msg);
+
+ virtual bool setSRational(ExifIfd ifd, ExifTag tag, int32_t numerator, int32_t denominator,
+ const std::string& msg);
+
+ virtual bool setString(ExifIfd ifd, ExifTag tag, ExifFormat format, const std::string& buffer,
+ const std::string& msg);
+
+ // Destroys the buffer of APP1 segment if exists.
+ virtual void destroyApp1();
+
+ // The Exif data (APP1). Owned by this class.
+ ExifData* exif_data_;
+ // The raw data of APP1 segment. It's allocated by ExifMem in |exif_data_| but
+ // owned by this class.
+ uint8_t* app1_buffer_;
+ // The length of |app1_buffer_|.
+ unsigned int app1_length_;
+};
+
+#define SET_SHORT(ifd, tag, value) \
+ do { \
+ if (setShort(ifd, tag, value, #tag) == false) return false; \
+ } while (0);
+
+#define SET_LONG(ifd, tag, value) \
+ do { \
+ if (setLong(ifd, tag, value, #tag) == false) return false; \
+ } while (0);
+
+#define SET_RATIONAL(ifd, tag, numerator, denominator) \
+ do { \
+ if (setRational(ifd, tag, numerator, denominator, #tag) == false) return false; \
+ } while (0);
+
+#define SET_SRATIONAL(ifd, tag, numerator, denominator) \
+ do { \
+ if (setSRational(ifd, tag, numerator, denominator, #tag) == false) return false; \
+ } while (0);
+
+#define SET_STRING(ifd, tag, format, buffer) \
+ do { \
+ if (setString(ifd, tag, format, buffer, #tag) == false) return false; \
+ } while (0);
+
+// This comes from the Exif Version 2.2 standard table 6.
+const char gExifAsciiPrefix[] = {0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0};
+
+static void setLatitudeOrLongitudeData(unsigned char* data, double num) {
+ // Take the integer part of |num|.
+ ExifLong degrees = static_cast<ExifLong>(num);
+ ExifLong minutes = static_cast<ExifLong>(60 * (num - degrees));
+ ExifLong microseconds = static_cast<ExifLong>(3600000000u * (num - degrees - minutes / 60.0));
+ exif_set_rational(data, EXIF_BYTE_ORDER_INTEL, {degrees, 1});
+ exif_set_rational(data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL, {minutes, 1});
+ exif_set_rational(data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+ {microseconds, 1000000});
+}
+
+ExifUtils* ExifUtils::create() {
+ return new ExifUtilsImpl();
+}
+
+ExifUtils::~ExifUtils() {}
+
+ExifUtilsImpl::ExifUtilsImpl() : exif_data_(nullptr), app1_buffer_(nullptr), app1_length_(0) {}
+
+ExifUtilsImpl::~ExifUtilsImpl() {
+ reset();
+}
+
+bool ExifUtilsImpl::initialize() {
+ reset();
+ exif_data_ = exif_data_new();
+ if (exif_data_ == nullptr) {
+ ALOGE("%s: allocate memory for exif_data_ failed", __FUNCTION__);
+ return false;
+ }
+ // set the image options.
+ exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
+ exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
+
+ // set exif version to 2.2.
+ if (!setExifVersion("0220")) {
+ return false;
+ }
+
+ return true;
+}
+
+bool ExifUtilsImpl::setAperture(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_APERTURE_VALUE, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setBrightness(int32_t numerator, int32_t denominator) {
+ SET_SRATIONAL(EXIF_IFD_EXIF, EXIF_TAG_BRIGHTNESS_VALUE, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setColorSpace(uint16_t color_space) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_COLOR_SPACE, color_space);
+ return true;
+}
+
+bool ExifUtilsImpl::setComponentsConfiguration(const std::string& components_configuration) {
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_COMPONENTS_CONFIGURATION, EXIF_FORMAT_UNDEFINED,
+ components_configuration);
+ return true;
+}
+
+bool ExifUtilsImpl::setCompression(uint16_t compression) {
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_COMPRESSION, compression);
+ return true;
+}
+
+bool ExifUtilsImpl::setContrast(uint16_t contrast) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_CONTRAST, contrast);
+ return true;
+}
+
+bool ExifUtilsImpl::setDateTime(const struct tm& t) {
+ // The length is 20 bytes including NULL for termination in Exif standard.
+ char str[20];
+ int result = snprintf(str, sizeof(str), "%04i:%02i:%02i %02i:%02i:%02i", t.tm_year + 1900,
+ t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec);
+ if (result != sizeof(str) - 1) {
+ ALOGW("%s: Input time is invalid", __FUNCTION__);
+ return false;
+ }
+ std::string buffer(str);
+ SET_STRING(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII, buffer);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_ORIGINAL, EXIF_FORMAT_ASCII, buffer);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_DIGITIZED, EXIF_FORMAT_ASCII, buffer);
+ return true;
+}
+
+bool ExifUtilsImpl::setDescription(const std::string& description) {
+ SET_STRING(EXIF_IFD_0, EXIF_TAG_IMAGE_DESCRIPTION, EXIF_FORMAT_ASCII, description);
+ return true;
+}
+
+bool ExifUtilsImpl::setDigitalZoomRatio(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_DIGITAL_ZOOM_RATIO, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureBias(int32_t numerator, int32_t denominator) {
+ SET_SRATIONAL(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_BIAS_VALUE, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureMode(uint16_t exposure_mode) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_MODE, exposure_mode);
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureProgram(uint16_t exposure_program) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_PROGRAM, exposure_program);
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureTime(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_TIME, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setFlash(uint16_t flash) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_FLASH, flash);
+ return true;
+}
+
+bool ExifUtilsImpl::setFNumber(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_FNUMBER, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setFocalLength(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setGainControl(uint16_t gain_control) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_GAIN_CONTROL, gain_control);
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsAltitude(double altitude) {
+ ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE_REF);
+ std::unique_ptr<ExifEntry> refEntry =
+ addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_BYTE, 1, 1);
+ if (!refEntry) {
+ ALOGE("%s: Adding GPSAltitudeRef exif entry failed", __FUNCTION__);
+ return false;
+ }
+ if (altitude >= 0) {
+ *refEntry->data = 0;
+ } else {
+ *refEntry->data = 1;
+ altitude *= -1;
+ }
+
+ ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE);
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 1, sizeof(ExifRational));
+ if (!entry) {
+ exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+ ALOGE("%s: Adding GPSAltitude exif entry failed", __FUNCTION__);
+ return false;
+ }
+ exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(altitude * 1000), 1000});
+
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsLatitude(double latitude) {
+ const ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE_REF);
+ std::unique_ptr<ExifEntry> refEntry =
+ addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
+ if (!refEntry) {
+ ALOGE("%s: Adding GPSLatitudeRef exif entry failed", __FUNCTION__);
+ return false;
+ }
+ if (latitude >= 0) {
+ memcpy(refEntry->data, "N", sizeof("N"));
+ } else {
+ memcpy(refEntry->data, "S", sizeof("S"));
+ latitude *= -1;
+ }
+
+ const ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE);
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
+ if (!entry) {
+ exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+ ALOGE("%s: Adding GPSLatitude exif entry failed", __FUNCTION__);
+ return false;
+ }
+ setLatitudeOrLongitudeData(entry->data, latitude);
+
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsLongitude(double longitude) {
+ ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE_REF);
+ std::unique_ptr<ExifEntry> refEntry =
+ addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
+ if (!refEntry) {
+ ALOGE("%s: Adding GPSLongitudeRef exif entry failed", __FUNCTION__);
+ return false;
+ }
+ if (longitude >= 0) {
+ memcpy(refEntry->data, "E", sizeof("E"));
+ } else {
+ memcpy(refEntry->data, "W", sizeof("W"));
+ longitude *= -1;
+ }
+
+ ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE);
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
+ if (!entry) {
+ exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+ ALOGE("%s: Adding GPSLongitude exif entry failed", __FUNCTION__);
+ return false;
+ }
+ setLatitudeOrLongitudeData(entry->data, longitude);
+
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsProcessingMethod(const std::string& method) {
+ std::string buffer = std::string(gExifAsciiPrefix, sizeof(gExifAsciiPrefix)) + method;
+ SET_STRING(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_PROCESSING_METHOD),
+ EXIF_FORMAT_UNDEFINED, buffer);
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsTimestamp(const struct tm& t) {
+ const ExifTag dateTag = static_cast<ExifTag>(EXIF_TAG_GPS_DATE_STAMP);
+ const size_t kGpsDateStampSize = 11;
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, dateTag, EXIF_FORMAT_ASCII, kGpsDateStampSize, kGpsDateStampSize);
+ if (!entry) {
+ ALOGE("%s: Adding GPSDateStamp exif entry failed", __FUNCTION__);
+ return false;
+ }
+ int result = snprintf(reinterpret_cast<char*>(entry->data), kGpsDateStampSize, "%04i:%02i:%02i",
+ t.tm_year + 1900, t.tm_mon + 1, t.tm_mday);
+ if (result != kGpsDateStampSize - 1) {
+ ALOGW("%s: Input time is invalid", __FUNCTION__);
+ return false;
+ }
+
+ const ExifTag timeTag = static_cast<ExifTag>(EXIF_TAG_GPS_TIME_STAMP);
+ entry = addVariableLengthEntry(EXIF_IFD_GPS, timeTag, EXIF_FORMAT_RATIONAL, 3,
+ 3 * sizeof(ExifRational));
+ if (!entry) {
+ ALOGE("%s: Adding GPSTimeStamp exif entry failed", __FUNCTION__);
+ return false;
+ }
+ exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL, {static_cast<ExifLong>(t.tm_hour), 1});
+ exif_set_rational(entry->data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(t.tm_min), 1});
+ exif_set_rational(entry->data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(t.tm_sec), 1});
+
+ return true;
+}
+
+bool ExifUtilsImpl::setImageHeight(uint32_t length) {
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_IMAGE_LENGTH, length);
+ SET_LONG(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_Y_DIMENSION, length);
+ return true;
+}
+
+bool ExifUtilsImpl::setImageWidth(uint32_t width) {
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_IMAGE_WIDTH, width);
+ SET_LONG(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_X_DIMENSION, width);
+ return true;
+}
+
+bool ExifUtilsImpl::setIsoSpeedRating(uint16_t iso_speed_ratings) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_ISO_SPEED_RATINGS, iso_speed_ratings);
+ return true;
+}
+
+bool ExifUtilsImpl::setLightSource(uint16_t light_source) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_LIGHT_SOURCE, light_source);
+ return true;
+}
+
+bool ExifUtilsImpl::setMaxAperture(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_MAX_APERTURE_VALUE, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setMeteringMode(uint16_t metering_mode) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_METERING_MODE, metering_mode);
+ return true;
+}
+
+bool ExifUtilsImpl::setOrientation(uint16_t orientation) {
+ /*
+ * Orientation value:
+ * 1 2 3 4 5 6 7 8
+ *
+ * 888888 888888 88 88 8888888888 88 88 8888888888
+ * 88 88 88 88 88 88 88 88 88 88 88 88
+ * 8888 8888 8888 8888 88 8888888888 8888888888 88
+ * 88 88 88 88
+ * 88 88 888888 888888
+ */
+ int value = 1;
+ switch (orientation) {
+ case 90:
+ value = 6;
+ break;
+ case 180:
+ value = 3;
+ break;
+ case 270:
+ value = 8;
+ break;
+ default:
+ break;
+ }
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value);
+ return true;
+}
+
+bool ExifUtilsImpl::setResolutionUnit(uint16_t resolution_unit) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_RESOLUTION_UNIT, resolution_unit);
+ return true;
+}
+
+bool ExifUtilsImpl::setSaturation(uint16_t saturation) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_SATURATION, saturation);
+ return true;
+}
+
+bool ExifUtilsImpl::setSceneCaptureType(uint16_t type) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_SCENE_CAPTURE_TYPE, type);
+ return true;
+}
+
+bool ExifUtilsImpl::setSharpness(uint16_t sharpness) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_SHARPNESS, sharpness);
+ return true;
+}
+
+bool ExifUtilsImpl::setShutterSpeed(int32_t numerator, int32_t denominator) {
+ SET_SRATIONAL(EXIF_IFD_EXIF, EXIF_TAG_SHUTTER_SPEED_VALUE, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setSubjectDistance(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_SUBJECT_DISTANCE, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setSubsecTime(const std::string& subsec_time) {
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME, EXIF_FORMAT_ASCII, subsec_time);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_ORIGINAL, EXIF_FORMAT_ASCII, subsec_time);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_DIGITIZED, EXIF_FORMAT_ASCII, subsec_time);
+ return true;
+}
+
+bool ExifUtilsImpl::setWhiteBalance(uint16_t white_balance) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_WHITE_BALANCE, white_balance);
+ return true;
+}
+
+bool ExifUtilsImpl::setXResolution(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_X_RESOLUTION, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setYCbCrPositioning(uint16_t ycbcr_positioning) {
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_YCBCR_POSITIONING, ycbcr_positioning);
+ return true;
+}
+
+bool ExifUtilsImpl::setYResolution(uint32_t numerator, uint32_t denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_Y_RESOLUTION, numerator, denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::generateApp1(const void* thumbnail_buffer, uint32_t size) {
+ destroyApp1();
+ exif_data_->data = const_cast<uint8_t*>(static_cast<const uint8_t*>(thumbnail_buffer));
+ exif_data_->size = size;
+ // Save the result into |app1_buffer_|.
+ exif_data_save_data(exif_data_, &app1_buffer_, &app1_length_);
+ if (!app1_length_) {
+ ALOGE("%s: Allocate memory for app1_buffer_ failed", __FUNCTION__);
+ return false;
+ }
+ /*
+ * The JPEG segment size is 16 bits in spec. The size of APP1 segment should
+ * be smaller than 65533 because there are two bytes for segment size field.
+ */
+ if (app1_length_ > 65533) {
+ destroyApp1();
+ ALOGE("%s: The size of APP1 segment is too large", __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
+const uint8_t* ExifUtilsImpl::getApp1Buffer() {
+ return app1_buffer_;
+}
+
+unsigned int ExifUtilsImpl::getApp1Length() {
+ return app1_length_;
+}
+
+bool ExifUtilsImpl::setExifVersion(const std::string& exif_version) {
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_EXIF_VERSION, EXIF_FORMAT_UNDEFINED, exif_version);
+ return true;
+}
+
+bool ExifUtilsImpl::setMake(const std::string& make) {
+ SET_STRING(EXIF_IFD_0, EXIF_TAG_MAKE, EXIF_FORMAT_ASCII, make);
+ return true;
+}
+
+bool ExifUtilsImpl::setModel(const std::string& model) {
+ SET_STRING(EXIF_IFD_0, EXIF_TAG_MODEL, EXIF_FORMAT_ASCII, model);
+ return true;
+}
+
+void ExifUtilsImpl::reset() {
+ destroyApp1();
+ if (exif_data_) {
+ /*
+ * Since we decided to ignore the original APP1, we are sure that there is
+ * no thumbnail allocated by libexif. |exif_data_->data| is actually
+ * allocated by JpegCompressor. sets |exif_data_->data| to nullptr to
+ * prevent exif_data_unref() destroy it incorrectly.
+ */
+ exif_data_->data = nullptr;
+ exif_data_->size = 0;
+ exif_data_unref(exif_data_);
+ exif_data_ = nullptr;
+ }
+}
+
+std::unique_ptr<ExifEntry> ExifUtilsImpl::addVariableLengthEntry(ExifIfd ifd, ExifTag tag,
+ ExifFormat format,
+ uint64_t components,
+ unsigned int size) {
+ // Remove old entry if exists.
+ exif_content_remove_entry(exif_data_->ifd[ifd],
+ exif_content_get_entry(exif_data_->ifd[ifd], tag));
+ ExifMem* mem = exif_mem_new_default();
+ if (!mem) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ return nullptr;
+ }
+ std::unique_ptr<ExifEntry> entry(exif_entry_new_mem(mem));
+ if (!entry) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ exif_mem_unref(mem);
+ return nullptr;
+ }
+ void* tmpBuffer = exif_mem_alloc(mem, size);
+ if (!tmpBuffer) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ exif_mem_unref(mem);
+ return nullptr;
+ }
+
+ entry->data = static_cast<unsigned char*>(tmpBuffer);
+ entry->tag = tag;
+ entry->format = format;
+ entry->components = components;
+ entry->size = size;
+
+ exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
+ exif_mem_unref(mem);
+
+ return entry;
+}
+
+std::unique_ptr<ExifEntry> ExifUtilsImpl::addEntry(ExifIfd ifd, ExifTag tag) {
+ std::unique_ptr<ExifEntry> entry(exif_content_get_entry(exif_data_->ifd[ifd], tag));
+ if (entry) {
+ // exif_content_get_entry() won't ref the entry, so we ref here.
+ exif_entry_ref(entry.get());
+ return entry;
+ }
+ entry.reset(exif_entry_new());
+ if (!entry) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ return nullptr;
+ }
+ entry->tag = tag;
+ exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
+ exif_entry_initialize(entry.get(), tag);
+ return entry;
+}
+
+bool ExifUtilsImpl::setShort(ExifIfd ifd, ExifTag tag, uint16_t value, const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_short(entry->data, EXIF_BYTE_ORDER_INTEL, value);
+ return true;
+}
+
+bool ExifUtilsImpl::setLong(ExifIfd ifd, ExifTag tag, uint32_t value, const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_long(entry->data, EXIF_BYTE_ORDER_INTEL, value);
+ return true;
+}
+
+bool ExifUtilsImpl::setRational(ExifIfd ifd, ExifTag tag, uint32_t numerator, uint32_t denominator,
+ const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL, {numerator, denominator});
+ return true;
+}
+
+bool ExifUtilsImpl::setSRational(ExifIfd ifd, ExifTag tag, int32_t numerator, int32_t denominator,
+ const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_srational(entry->data, EXIF_BYTE_ORDER_INTEL, {numerator, denominator});
+ return true;
+}
+
+bool ExifUtilsImpl::setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string& buffer, const std::string& msg) {
+ size_t entry_size = buffer.length();
+ // Since the exif format is undefined, NULL termination is not necessary.
+ if (format == EXIF_FORMAT_ASCII) {
+ entry_size++;
+ }
+ std::unique_ptr<ExifEntry> entry =
+ addVariableLengthEntry(ifd, tag, format, entry_size, entry_size);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ memcpy(entry->data, buffer.c_str(), entry_size);
+ return true;
+}
+
+void ExifUtilsImpl::destroyApp1() {
+ /*
+ * Since there is no API to access ExifMem in ExifData->priv, we use free
+ * here, which is the default free function in libexif. See
+ * exif_data_save_data() for detail.
+ */
+ free(app1_buffer_);
+ app1_buffer_ = nullptr;
+ app1_length_ = 0;
+}
+
+bool ExifUtilsImpl::setFromMetadata(const CameraMetadata& metadata, const size_t imageWidth,
+ const size_t imageHeight) {
+ // How precise the float-to-rational conversion for EXIF tags would be.
+ constexpr int kRationalPrecision = 10000;
+ if (!setImageWidth(imageWidth) || !setImageHeight(imageHeight)) {
+ ALOGE("%s: setting image resolution failed.", __FUNCTION__);
+ return false;
+ }
+
+ struct timespec tp;
+ struct tm time_info;
+ bool time_available = clock_gettime(CLOCK_REALTIME, &tp) != -1;
+ localtime_r(&tp.tv_sec, &time_info);
+ if (!setDateTime(time_info)) {
+ ALOGE("%s: setting data time failed.", __FUNCTION__);
+ return false;
+ }
+
+ float focal_length;
+ camera_metadata_ro_entry entry = metadata.find(ANDROID_LENS_FOCAL_LENGTH);
+ if (entry.count) {
+ focal_length = entry.data.f[0];
+
+ if (!setFocalLength(static_cast<uint32_t>(focal_length * kRationalPrecision),
+ kRationalPrecision)) {
+ ALOGE("%s: setting focal length failed.", __FUNCTION__);
+ return false;
+ }
+ } else {
+ ALOGV("%s: Cannot find focal length in metadata.", __FUNCTION__);
+ }
+
+ if (metadata.exists(ANDROID_JPEG_GPS_COORDINATES)) {
+ entry = metadata.find(ANDROID_JPEG_GPS_COORDINATES);
+ if (entry.count < 3) {
+ ALOGE("%s: Gps coordinates in metadata is not complete.", __FUNCTION__);
+ return false;
+ }
+ if (!setGpsLatitude(entry.data.d[0])) {
+ ALOGE("%s: setting gps latitude failed.", __FUNCTION__);
+ return false;
+ }
+ if (!setGpsLongitude(entry.data.d[1])) {
+ ALOGE("%s: setting gps longitude failed.", __FUNCTION__);
+ return false;
+ }
+ if (!setGpsAltitude(entry.data.d[2])) {
+ ALOGE("%s: setting gps altitude failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) {
+ entry = metadata.find(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+ std::string method_str(reinterpret_cast<const char*>(entry.data.u8));
+ if (!setGpsProcessingMethod(method_str)) {
+ ALOGE("%s: setting gps processing method failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (time_available && metadata.exists(ANDROID_JPEG_GPS_TIMESTAMP)) {
+ entry = metadata.find(ANDROID_JPEG_GPS_TIMESTAMP);
+ time_t timestamp = static_cast<time_t>(entry.data.i64[0]);
+ if (gmtime_r(×tamp, &time_info)) {
+ if (!setGpsTimestamp(time_info)) {
+ ALOGE("%s: setting gps timestamp failed.", __FUNCTION__);
+ return false;
+ }
+ } else {
+ ALOGE("%s: Time tranformation failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_JPEG_ORIENTATION)) {
+ entry = metadata.find(ANDROID_JPEG_ORIENTATION);
+ if (!setOrientation(entry.data.i32[0])) {
+ ALOGE("%s: setting orientation failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_SENSOR_EXPOSURE_TIME)) {
+ entry = metadata.find(ANDROID_SENSOR_EXPOSURE_TIME);
+ // int64_t of nanoseconds
+ if (!setExposureTime(entry.data.i64[0], 1000000000u)) {
+ ALOGE("%s: setting exposure time failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_LENS_APERTURE)) {
+ const int kAperturePrecision = 10000;
+ entry = metadata.find(ANDROID_LENS_APERTURE);
+ if (!setFNumber(entry.data.f[0] * kAperturePrecision, kAperturePrecision)) {
+ ALOGE("%s: setting F number failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_FLASH_INFO_AVAILABLE)) {
+ entry = metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
+ if (entry.data.u8[0] == ANDROID_FLASH_INFO_AVAILABLE_FALSE) {
+ const uint32_t kNoFlashFunction = 0x20;
+ if (!setFlash(kNoFlashFunction)) {
+ ALOGE("%s: setting flash failed.", __FUNCTION__);
+ return false;
+ }
+ } else {
+ ALOGE("%s: Unsupported flash info: %d", __FUNCTION__, entry.data.u8[0]);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_CONTROL_AWB_MODE)) {
+ entry = metadata.find(ANDROID_CONTROL_AWB_MODE);
+ if (entry.data.u8[0] == ANDROID_CONTROL_AWB_MODE_AUTO) {
+ const uint16_t kAutoWhiteBalance = 0;
+ if (!setWhiteBalance(kAutoWhiteBalance)) {
+ ALOGE("%s: setting white balance failed.", __FUNCTION__);
+ return false;
+ }
+ } else {
+ ALOGE("%s: Unsupported awb mode: %d", __FUNCTION__, entry.data.u8[0]);
+ return false;
+ }
+ }
+
+ if (time_available) {
+ char str[4];
+ if (snprintf(str, sizeof(str), "%03ld", tp.tv_nsec / 1000000) < 0) {
+ ALOGE("%s: Subsec is invalid: %ld", __FUNCTION__, tp.tv_nsec);
+ return false;
+ }
+ if (!setSubsecTime(std::string(str))) {
+ ALOGE("%s: setting subsec time failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
diff --git a/camera/common/default/HandleImporter.cpp b/camera/common/default/HandleImporter.cpp
new file mode 100644
index 0000000..1145baa
--- /dev/null
+++ b/camera/common/default/HandleImporter.cpp
@@ -0,0 +1,530 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HandleImporter"
+#include "HandleImporter.h"
+
+#include <gralloctypes/Gralloc4.h>
+#include <log/log.h>
+#include "aidl/android/hardware/graphics/common/Smpte2086.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+using aidl::android::hardware::graphics::common::PlaneLayout;
+using aidl::android::hardware::graphics::common::PlaneLayoutComponent;
+using aidl::android::hardware::graphics::common::PlaneLayoutComponentType;
+using aidl::android::hardware::graphics::common::Smpte2086;
+using MetadataType = android::hardware::graphics::mapper::V4_0::IMapper::MetadataType;
+using MapperErrorV2 = android::hardware::graphics::mapper::V2_0::Error;
+using MapperErrorV3 = android::hardware::graphics::mapper::V3_0::Error;
+using MapperErrorV4 = android::hardware::graphics::mapper::V4_0::Error;
+using IMapperV3 = android::hardware::graphics::mapper::V3_0::IMapper;
+using IMapperV4 = android::hardware::graphics::mapper::V4_0::IMapper;
+
+HandleImporter::HandleImporter() : mInitialized(false) {}
+
+void HandleImporter::initializeLocked() {
+ if (mInitialized) {
+ return;
+ }
+
+ mMapperV4 = IMapperV4::getService();
+ if (mMapperV4 != nullptr) {
+ mInitialized = true;
+ return;
+ }
+
+ mMapperV3 = IMapperV3::getService();
+ if (mMapperV3 != nullptr) {
+ mInitialized = true;
+ return;
+ }
+
+ mMapperV2 = IMapper::getService();
+ if (mMapperV2 == nullptr) {
+ ALOGE("%s: cannnot acccess graphics mapper HAL!", __FUNCTION__);
+ return;
+ }
+
+ mInitialized = true;
+ return;
+}
+
+void HandleImporter::cleanup() {
+ mMapperV4.clear();
+ mMapperV3.clear();
+ mMapperV2.clear();
+ mInitialized = false;
+}
+
+template <class M, class E>
+bool HandleImporter::importBufferInternal(const sp<M> mapper, buffer_handle_t& handle) {
+ E error;
+ buffer_handle_t importedHandle;
+ auto ret = mapper->importBuffer(
+ hidl_handle(handle), [&](const auto& tmpError, const auto& tmpBufferHandle) {
+ error = tmpError;
+ importedHandle = static_cast<buffer_handle_t>(tmpBufferHandle);
+ });
+
+ if (!ret.isOk()) {
+ ALOGE("%s: mapper importBuffer failed: %s", __FUNCTION__, ret.description().c_str());
+ return false;
+ }
+
+ if (error != E::NONE) {
+ return false;
+ }
+
+ handle = importedHandle;
+ return true;
+}
+
+template <class M, class E>
+YCbCrLayout HandleImporter::lockYCbCrInternal(const sp<M> mapper, buffer_handle_t& buf,
+ uint64_t cpuUsage,
+ const IMapper::Rect& accessRegion) {
+ hidl_handle acquireFenceHandle;
+ auto buffer = const_cast<native_handle_t*>(buf);
+ YCbCrLayout layout = {};
+
+ typename M::Rect accessRegionCopy = {accessRegion.left, accessRegion.top, accessRegion.width,
+ accessRegion.height};
+ mapper->lockYCbCr(buffer, cpuUsage, accessRegionCopy, acquireFenceHandle,
+ [&](const auto& tmpError, const auto& tmpLayout) {
+ if (tmpError == E::NONE) {
+ // Member by member copy from different versions of YCbCrLayout.
+ layout.y = tmpLayout.y;
+ layout.cb = tmpLayout.cb;
+ layout.cr = tmpLayout.cr;
+ layout.yStride = tmpLayout.yStride;
+ layout.cStride = tmpLayout.cStride;
+ layout.chromaStep = tmpLayout.chromaStep;
+ } else {
+ ALOGE("%s: failed to lockYCbCr error %d!", __FUNCTION__, tmpError);
+ }
+ });
+ return layout;
+}
+
+bool isMetadataPesent(const sp<IMapperV4> mapper, const buffer_handle_t& buf,
+ MetadataType metadataType) {
+ auto buffer = const_cast<native_handle_t*>(buf);
+ bool ret = false;
+ hidl_vec<uint8_t> vec;
+ mapper->get(buffer, metadataType, [&](const auto& tmpError, const auto& tmpMetadata) {
+ if (tmpError == MapperErrorV4::NONE) {
+ vec = tmpMetadata;
+ } else {
+ ALOGE("%s: failed to get metadata %d!", __FUNCTION__, tmpError);
+ }
+ });
+
+ if (vec.size() > 0) {
+ if (metadataType == gralloc4::MetadataType_Smpte2086) {
+ std::optional<Smpte2086> realSmpte2086;
+ gralloc4::decodeSmpte2086(vec, &realSmpte2086);
+ ret = realSmpte2086.has_value();
+ } else if (metadataType == gralloc4::MetadataType_Smpte2094_10) {
+ std::optional<std::vector<uint8_t>> realSmpte2094_10;
+ gralloc4::decodeSmpte2094_10(vec, &realSmpte2094_10);
+ ret = realSmpte2094_10.has_value();
+ } else if (metadataType == gralloc4::MetadataType_Smpte2094_40) {
+ std::optional<std::vector<uint8_t>> realSmpte2094_40;
+ gralloc4::decodeSmpte2094_40(vec, &realSmpte2094_40);
+ ret = realSmpte2094_40.has_value();
+ } else {
+ ALOGE("%s: Unknown metadata type!", __FUNCTION__);
+ }
+ }
+
+ return ret;
+}
+
+std::vector<PlaneLayout> getPlaneLayouts(const sp<IMapperV4> mapper, buffer_handle_t& buf) {
+ auto buffer = const_cast<native_handle_t*>(buf);
+ std::vector<PlaneLayout> planeLayouts;
+ hidl_vec<uint8_t> encodedPlaneLayouts;
+ mapper->get(buffer, gralloc4::MetadataType_PlaneLayouts,
+ [&](const auto& tmpError, const auto& tmpEncodedPlaneLayouts) {
+ if (tmpError == MapperErrorV4::NONE) {
+ encodedPlaneLayouts = tmpEncodedPlaneLayouts;
+ } else {
+ ALOGE("%s: failed to get plane layouts %d!", __FUNCTION__, tmpError);
+ }
+ });
+
+ gralloc4::decodePlaneLayouts(encodedPlaneLayouts, &planeLayouts);
+
+ return planeLayouts;
+}
+
+template <>
+YCbCrLayout HandleImporter::lockYCbCrInternal<IMapperV4, MapperErrorV4>(
+ const sp<IMapperV4> mapper, buffer_handle_t& buf, uint64_t cpuUsage,
+ const IMapper::Rect& accessRegion) {
+ hidl_handle acquireFenceHandle;
+ auto buffer = const_cast<native_handle_t*>(buf);
+ YCbCrLayout layout = {};
+ void* mapped = nullptr;
+
+ typename IMapperV4::Rect accessRegionV4 = {accessRegion.left, accessRegion.top,
+ accessRegion.width, accessRegion.height};
+ mapper->lock(buffer, cpuUsage, accessRegionV4, acquireFenceHandle,
+ [&](const auto& tmpError, const auto& tmpPtr) {
+ if (tmpError == MapperErrorV4::NONE) {
+ mapped = tmpPtr;
+ } else {
+ ALOGE("%s: failed to lock error %d!", __FUNCTION__, tmpError);
+ }
+ });
+
+ if (mapped == nullptr) {
+ return layout;
+ }
+
+ std::vector<PlaneLayout> planeLayouts = getPlaneLayouts(mapper, buf);
+ for (const auto& planeLayout : planeLayouts) {
+ for (const auto& planeLayoutComponent : planeLayout.components) {
+ const auto& type = planeLayoutComponent.type;
+
+ if (!gralloc4::isStandardPlaneLayoutComponentType(type)) {
+ continue;
+ }
+
+ uint8_t* data = reinterpret_cast<uint8_t*>(mapped);
+ data += planeLayout.offsetInBytes;
+ data += planeLayoutComponent.offsetInBits / 8;
+
+ switch (static_cast<PlaneLayoutComponentType>(type.value)) {
+ case PlaneLayoutComponentType::Y:
+ layout.y = data;
+ layout.yStride = planeLayout.strideInBytes;
+ break;
+ case PlaneLayoutComponentType::CB:
+ layout.cb = data;
+ layout.cStride = planeLayout.strideInBytes;
+ layout.chromaStep = planeLayout.sampleIncrementInBits / 8;
+ break;
+ case PlaneLayoutComponentType::CR:
+ layout.cr = data;
+ layout.cStride = planeLayout.strideInBytes;
+ layout.chromaStep = planeLayout.sampleIncrementInBits / 8;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return layout;
+}
+
+template <class M, class E>
+int HandleImporter::unlockInternal(const sp<M> mapper, buffer_handle_t& buf) {
+ int releaseFence = -1;
+ auto buffer = const_cast<native_handle_t*>(buf);
+
+ mapper->unlock(buffer, [&](const auto& tmpError, const auto& tmpReleaseFence) {
+ if (tmpError == E::NONE) {
+ auto fenceHandle = tmpReleaseFence.getNativeHandle();
+ if (fenceHandle) {
+ if (fenceHandle->numInts != 0 || fenceHandle->numFds != 1) {
+ ALOGE("%s: bad release fence numInts %d numFds %d", __FUNCTION__,
+ fenceHandle->numInts, fenceHandle->numFds);
+ return;
+ }
+ releaseFence = dup(fenceHandle->data[0]);
+ if (releaseFence < 0) {
+ ALOGE("%s: bad release fence FD %d", __FUNCTION__, releaseFence);
+ }
+ }
+ } else {
+ ALOGE("%s: failed to unlock error %d!", __FUNCTION__, tmpError);
+ }
+ });
+ return releaseFence;
+}
+
+// In IComposer, any buffer_handle_t is owned by the caller and we need to
+// make a clone for hwcomposer2. We also need to translate empty handle
+// to nullptr. This function does that, in-place.
+bool HandleImporter::importBuffer(buffer_handle_t& handle) {
+ if (!handle->numFds && !handle->numInts) {
+ handle = nullptr;
+ return true;
+ }
+
+ Mutex::Autolock lock(mLock);
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ return importBufferInternal<IMapperV4, MapperErrorV4>(mMapperV4, handle);
+ }
+
+ if (mMapperV3 != nullptr) {
+ return importBufferInternal<IMapperV3, MapperErrorV3>(mMapperV3, handle);
+ }
+
+ if (mMapperV2 != nullptr) {
+ return importBufferInternal<IMapper, MapperErrorV2>(mMapperV2, handle);
+ }
+
+ ALOGE("%s: mMapperV4, mMapperV3 and mMapperV2 are all null!", __FUNCTION__);
+ return false;
+}
+
+void HandleImporter::freeBuffer(buffer_handle_t handle) {
+ if (!handle) {
+ return;
+ }
+
+ Mutex::Autolock lock(mLock);
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ auto ret = mMapperV4->freeBuffer(const_cast<native_handle_t*>(handle));
+ if (!ret.isOk()) {
+ ALOGE("%s: mapper freeBuffer failed: %s", __FUNCTION__, ret.description().c_str());
+ }
+ } else if (mMapperV3 != nullptr) {
+ auto ret = mMapperV3->freeBuffer(const_cast<native_handle_t*>(handle));
+ if (!ret.isOk()) {
+ ALOGE("%s: mapper freeBuffer failed: %s", __FUNCTION__, ret.description().c_str());
+ }
+ } else {
+ auto ret = mMapperV2->freeBuffer(const_cast<native_handle_t*>(handle));
+ if (!ret.isOk()) {
+ ALOGE("%s: mapper freeBuffer failed: %s", __FUNCTION__, ret.description().c_str());
+ }
+ }
+}
+
+bool HandleImporter::importFence(const native_handle_t* handle, int& fd) const {
+ if (handle == nullptr || handle->numFds == 0) {
+ fd = -1;
+ } else if (handle->numFds == 1) {
+ fd = dup(handle->data[0]);
+ if (fd < 0) {
+ ALOGE("failed to dup fence fd %d", handle->data[0]);
+ return false;
+ }
+ } else {
+ ALOGE("invalid fence handle with %d file descriptors", handle->numFds);
+ return false;
+ }
+
+ return true;
+}
+
+void HandleImporter::closeFence(int fd) const {
+ if (fd >= 0) {
+ close(fd);
+ }
+}
+
+void* HandleImporter::lock(buffer_handle_t& buf, uint64_t cpuUsage, size_t size) {
+ IMapper::Rect accessRegion{0, 0, static_cast<int>(size), 1};
+ return lock(buf, cpuUsage, accessRegion);
+}
+
+void* HandleImporter::lock(buffer_handle_t& buf, uint64_t cpuUsage,
+ const IMapper::Rect& accessRegion) {
+ Mutex::Autolock lock(mLock);
+
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ void* ret = nullptr;
+
+ if (mMapperV4 == nullptr && mMapperV3 == nullptr && mMapperV2 == nullptr) {
+ ALOGE("%s: mMapperV4, mMapperV3 and mMapperV2 are all null!", __FUNCTION__);
+ return ret;
+ }
+
+ hidl_handle acquireFenceHandle;
+ auto buffer = const_cast<native_handle_t*>(buf);
+ if (mMapperV4 != nullptr) {
+ IMapperV4::Rect accessRegionV4{accessRegion.left, accessRegion.top, accessRegion.width,
+ accessRegion.height};
+
+ mMapperV4->lock(buffer, cpuUsage, accessRegionV4, acquireFenceHandle,
+ [&](const auto& tmpError, const auto& tmpPtr) {
+ if (tmpError == MapperErrorV4::NONE) {
+ ret = tmpPtr;
+ } else {
+ ALOGE("%s: failed to lock error %d!", __FUNCTION__, tmpError);
+ }
+ });
+ } else if (mMapperV3 != nullptr) {
+ IMapperV3::Rect accessRegionV3{accessRegion.left, accessRegion.top, accessRegion.width,
+ accessRegion.height};
+
+ mMapperV3->lock(buffer, cpuUsage, accessRegionV3, acquireFenceHandle,
+ [&](const auto& tmpError, const auto& tmpPtr, const auto& /*bytesPerPixel*/,
+ const auto& /*bytesPerStride*/) {
+ if (tmpError == MapperErrorV3::NONE) {
+ ret = tmpPtr;
+ } else {
+ ALOGE("%s: failed to lock error %d!", __FUNCTION__, tmpError);
+ }
+ });
+ } else {
+ mMapperV2->lock(buffer, cpuUsage, accessRegion, acquireFenceHandle,
+ [&](const auto& tmpError, const auto& tmpPtr) {
+ if (tmpError == MapperErrorV2::NONE) {
+ ret = tmpPtr;
+ } else {
+ ALOGE("%s: failed to lock error %d!", __FUNCTION__, tmpError);
+ }
+ });
+ }
+
+ ALOGV("%s: ptr %p accessRegion.top: %d accessRegion.left: %d accessRegion.width: %d "
+ "accessRegion.height: %d",
+ __FUNCTION__, ret, accessRegion.top, accessRegion.left, accessRegion.width,
+ accessRegion.height);
+ return ret;
+}
+
+YCbCrLayout HandleImporter::lockYCbCr(buffer_handle_t& buf, uint64_t cpuUsage,
+ const IMapper::Rect& accessRegion) {
+ Mutex::Autolock lock(mLock);
+
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ return lockYCbCrInternal<IMapperV4, MapperErrorV4>(mMapperV4, buf, cpuUsage, accessRegion);
+ }
+
+ if (mMapperV3 != nullptr) {
+ return lockYCbCrInternal<IMapperV3, MapperErrorV3>(mMapperV3, buf, cpuUsage, accessRegion);
+ }
+
+ if (mMapperV2 != nullptr) {
+ return lockYCbCrInternal<IMapper, MapperErrorV2>(mMapperV2, buf, cpuUsage, accessRegion);
+ }
+
+ ALOGE("%s: mMapperV4, mMapperV3 and mMapperV2 are all null!", __FUNCTION__);
+ return {};
+}
+
+status_t HandleImporter::getMonoPlanarStrideBytes(buffer_handle_t& buf, uint32_t* stride /*out*/) {
+ if (stride == nullptr) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mLock);
+
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ std::vector<PlaneLayout> planeLayouts = getPlaneLayouts(mMapperV4, buf);
+ if (planeLayouts.size() != 1) {
+ ALOGE("%s: Unexpected number of planes %zu!", __FUNCTION__, planeLayouts.size());
+ return BAD_VALUE;
+ }
+
+ *stride = planeLayouts[0].strideInBytes;
+ } else {
+ ALOGE("%s: mMapperV4 is null! Query not supported!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ return OK;
+}
+
+int HandleImporter::unlock(buffer_handle_t& buf) {
+ if (mMapperV4 != nullptr) {
+ return unlockInternal<IMapperV4, MapperErrorV4>(mMapperV4, buf);
+ }
+ if (mMapperV3 != nullptr) {
+ return unlockInternal<IMapperV3, MapperErrorV3>(mMapperV3, buf);
+ }
+ if (mMapperV2 != nullptr) {
+ return unlockInternal<IMapper, MapperErrorV2>(mMapperV2, buf);
+ }
+
+ ALOGE("%s: mMapperV4, mMapperV3 and mMapperV2 are all null!", __FUNCTION__);
+ return -1;
+}
+
+bool HandleImporter::isSmpte2086Present(const buffer_handle_t& buf) {
+ Mutex::Autolock lock(mLock);
+
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ return isMetadataPesent(mMapperV4, buf, gralloc4::MetadataType_Smpte2086);
+ } else {
+ ALOGE("%s: mMapperV4 is null! Query not supported!", __FUNCTION__);
+ }
+
+ return false;
+}
+
+bool HandleImporter::isSmpte2094_10Present(const buffer_handle_t& buf) {
+ Mutex::Autolock lock(mLock);
+
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ return isMetadataPesent(mMapperV4, buf, gralloc4::MetadataType_Smpte2094_10);
+ } else {
+ ALOGE("%s: mMapperV4 is null! Query not supported!", __FUNCTION__);
+ }
+
+ return false;
+}
+
+bool HandleImporter::isSmpte2094_40Present(const buffer_handle_t& buf) {
+ Mutex::Autolock lock(mLock);
+
+ if (!mInitialized) {
+ initializeLocked();
+ }
+
+ if (mMapperV4 != nullptr) {
+ return isMetadataPesent(mMapperV4, buf, gralloc4::MetadataType_Smpte2094_40);
+ } else {
+ ALOGE("%s: mMapperV4 is null! Query not supported!", __FUNCTION__);
+ }
+
+ return false;
+}
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
diff --git a/camera/common/default/OWNERS b/camera/common/default/OWNERS
new file mode 100644
index 0000000..f48a95c
--- /dev/null
+++ b/camera/common/default/OWNERS
@@ -0,0 +1 @@
+include platform/frameworks/av:/camera/OWNERS
diff --git a/camera/common/default/SimpleThread.cpp b/camera/common/default/SimpleThread.cpp
new file mode 100644
index 0000000..46e89ba
--- /dev/null
+++ b/camera/common/default/SimpleThread.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SimpleThread.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+SimpleThread::SimpleThread() : mDone(true), mThread() {}
+SimpleThread::~SimpleThread() {
+ // Safe to call requestExitAndWait() from the destructor because requestExitAndWait() ensures
+ // that the thread is joinable before joining on it. This is different from how
+ // android::Thread worked.
+ requestExitAndWait();
+}
+
+void SimpleThread::run() {
+ requestExitAndWait(); // Exit current execution, if any.
+
+ // start thread
+ mDone.store(false, std::memory_order_release);
+ mThread = std::thread(&SimpleThread::runLoop, this);
+}
+
+void SimpleThread::requestExitAndWait() {
+ // Signal thread to stop
+ mDone.store(true, std::memory_order_release);
+
+ // Wait for thread to exit if needed. This should happen in no more than one iteration of
+ // threadLoop
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+ mThread = std::thread();
+}
+
+void SimpleThread::runLoop() {
+ while (!exitPending()) {
+ if (!threadLoop()) {
+ break;
+ }
+ }
+}
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
\ No newline at end of file
diff --git a/camera/common/default/VendorTagDescriptor.cpp b/camera/common/default/VendorTagDescriptor.cpp
new file mode 100644
index 0000000..1282bd0
--- /dev/null
+++ b/camera/common/default/VendorTagDescriptor.cpp
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CamComm1.0-VTDesc"
+
+#include <camera_metadata_hidden.h>
+#include <log/log.h>
+#include <system/camera_metadata.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+#include <utils/SortedVector.h>
+#include <utils/Vector.h>
+
+#include "VendorTagDescriptor.h"
+
+#include <stdio.h>
+#include <string.h>
+
+namespace android {
+namespace hardware {
+namespace camera2 {
+namespace params {
+
+VendorTagDescriptor::~VendorTagDescriptor() {
+ size_t len = mReverseMapping.size();
+ for (size_t i = 0; i < len; ++i) {
+ delete mReverseMapping[i];
+ }
+}
+
+VendorTagDescriptor::VendorTagDescriptor() : mTagCount(0), mVendorOps() {}
+
+VendorTagDescriptor::VendorTagDescriptor(const VendorTagDescriptor& src) {
+ copyFrom(src);
+}
+
+VendorTagDescriptor& VendorTagDescriptor::operator=(const VendorTagDescriptor& rhs) {
+ copyFrom(rhs);
+ return *this;
+}
+
+void VendorTagDescriptor::copyFrom(const VendorTagDescriptor& src) {
+ if (this == &src) return;
+
+ size_t len = mReverseMapping.size();
+ for (size_t i = 0; i < len; ++i) {
+ delete mReverseMapping[i];
+ }
+ mReverseMapping.clear();
+
+ len = src.mReverseMapping.size();
+ // Have to copy KeyedVectors inside mReverseMapping
+ for (size_t i = 0; i < len; ++i) {
+ KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+ *nameMapper = *(src.mReverseMapping.valueAt(i));
+ mReverseMapping.add(src.mReverseMapping.keyAt(i), nameMapper);
+ }
+ // Everything else is simple
+ mTagToNameMap = src.mTagToNameMap;
+ mTagToSectionMap = src.mTagToSectionMap;
+ mTagToTypeMap = src.mTagToTypeMap;
+ mSections = src.mSections;
+ mTagCount = src.mTagCount;
+ mVendorOps = src.mVendorOps;
+}
+
+int VendorTagDescriptor::getTagCount() const {
+ size_t size = mTagToNameMap.size();
+ if (size == 0) {
+ return VENDOR_TAG_COUNT_ERR;
+ }
+ return size;
+}
+
+void VendorTagDescriptor::getTagArray(uint32_t* tagArray) const {
+ size_t size = mTagToNameMap.size();
+ for (size_t i = 0; i < size; ++i) {
+ tagArray[i] = mTagToNameMap.keyAt(i);
+ }
+}
+
+const char* VendorTagDescriptor::getSectionName(uint32_t tag) const {
+ ssize_t index = mTagToSectionMap.indexOfKey(tag);
+ if (index < 0) {
+ return VENDOR_SECTION_NAME_ERR;
+ }
+ return mSections[mTagToSectionMap.valueAt(index)].string();
+}
+
+ssize_t VendorTagDescriptor::getSectionIndex(uint32_t tag) const {
+ return mTagToSectionMap.valueFor(tag);
+}
+
+const char* VendorTagDescriptor::getTagName(uint32_t tag) const {
+ ssize_t index = mTagToNameMap.indexOfKey(tag);
+ if (index < 0) {
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return mTagToNameMap.valueAt(index).string();
+}
+
+int VendorTagDescriptor::getTagType(uint32_t tag) const {
+ auto iter = mTagToTypeMap.find(tag);
+ if (iter == mTagToTypeMap.end()) {
+ return VENDOR_TAG_TYPE_ERR;
+ }
+ return iter->second;
+}
+
+const SortedVector<String8>* VendorTagDescriptor::getAllSectionNames() const {
+ return &mSections;
+}
+
+status_t VendorTagDescriptor::lookupTag(const String8& name, const String8& section,
+ /*out*/ uint32_t* tag) const {
+ ssize_t index = mReverseMapping.indexOfKey(section);
+ if (index < 0) {
+ ALOGE("%s: Section '%s' does not exist.", __FUNCTION__, section.string());
+ return BAD_VALUE;
+ }
+
+ ssize_t nameIndex = mReverseMapping[index]->indexOfKey(name);
+ if (nameIndex < 0) {
+ ALOGE("%s: Tag name '%s' does not exist.", __FUNCTION__, name.string());
+ return BAD_VALUE;
+ }
+
+ if (tag != NULL) {
+ *tag = mReverseMapping[index]->valueAt(nameIndex);
+ }
+ return OK;
+}
+
+void VendorTagDescriptor::dump(int fd, int verbosity, int indentation) const {
+ size_t size = mTagToNameMap.size();
+ if (size == 0) {
+ dprintf(fd, "%*sDumping configured vendor tag descriptors: None set\n", indentation, "");
+ return;
+ }
+
+ dprintf(fd, "%*sDumping configured vendor tag descriptors: %zu entries\n", indentation, "",
+ size);
+ for (size_t i = 0; i < size; ++i) {
+ uint32_t tag = mTagToNameMap.keyAt(i);
+
+ if (verbosity < 1) {
+ dprintf(fd, "%*s0x%x\n", indentation + 2, "", tag);
+ continue;
+ }
+ String8 name = mTagToNameMap.valueAt(i);
+ uint32_t sectionId = mTagToSectionMap.valueFor(tag);
+ String8 sectionName = mSections[sectionId];
+ int type = mTagToTypeMap.at(tag);
+ const char* typeName =
+ (type >= 0 && type < NUM_TYPES) ? camera_metadata_type_names[type] : "UNKNOWN";
+ dprintf(fd, "%*s0x%x (%s) with type %d (%s) defined in section %s\n", indentation + 2, "",
+ tag, name.string(), type, typeName, sectionName.string());
+ }
+}
+
+int VendorTagDescriptorCache::getTagCount(metadata_vendor_id_t id) const {
+ int ret = 0;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagCount();
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+void VendorTagDescriptorCache::getTagArray(uint32_t* tagArray, metadata_vendor_id_t id) const {
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ desc->second->getTagArray(tagArray);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+}
+
+const char* VendorTagDescriptorCache::getSectionName(uint32_t tag, metadata_vendor_id_t id) const {
+ const char* ret = nullptr;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getSectionName(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+const char* VendorTagDescriptorCache::getTagName(uint32_t tag, metadata_vendor_id_t id) const {
+ const char* ret = nullptr;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagName(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+int VendorTagDescriptorCache::getTagType(uint32_t tag, metadata_vendor_id_t id) const {
+ int ret = 0;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagType(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+void VendorTagDescriptorCache::dump(int fd, int verbosity, int indentation) const {
+ for (const auto& desc : mVendorMap) {
+ desc.second->dump(fd, verbosity, indentation);
+ }
+}
+
+int32_t VendorTagDescriptorCache::addVendorDescriptor(
+ metadata_vendor_id_t id, sp<hardware::camera::common::helper::VendorTagDescriptor> desc) {
+ auto entry = mVendorMap.find(id);
+ if (entry != mVendorMap.end()) {
+ ALOGE("%s: Vendor descriptor with same id already present!", __func__);
+ return BAD_VALUE;
+ }
+
+ mVendorMap.emplace(id, desc);
+ return NO_ERROR;
+}
+
+int32_t VendorTagDescriptorCache::getVendorTagDescriptor(
+ metadata_vendor_id_t id,
+ sp<hardware::camera::common::helper::VendorTagDescriptor>* desc /*out*/) {
+ auto entry = mVendorMap.find(id);
+ if (entry == mVendorMap.end()) {
+ return NAME_NOT_FOUND;
+ }
+
+ *desc = entry->second;
+
+ return NO_ERROR;
+}
+} // namespace params
+} // namespace camera2
+
+namespace camera {
+namespace common {
+namespace helper {
+
+extern "C" {
+
+static int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* v);
+static void vendor_tag_descriptor_get_all_tags(const vendor_tag_ops_t* v, uint32_t* tagArray);
+static const char* vendor_tag_descriptor_get_section_name(const vendor_tag_ops_t* v, uint32_t tag);
+static const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag);
+static int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag);
+
+static int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id);
+static void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray, metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+ metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag, metadata_vendor_id_t id);
+static int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag, metadata_vendor_id_t id);
+} /* extern "C" */
+
+static Mutex sLock;
+static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
+static sp<VendorTagDescriptorCache> sGlobalVendorTagDescriptorCache;
+
+status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+ /*out*/
+ sp<VendorTagDescriptor>& descriptor) {
+ if (vOps == NULL) {
+ ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ int tagCount = vOps->get_tag_count(vOps);
+ if (tagCount < 0 || tagCount > INT32_MAX) {
+ ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
+ return BAD_VALUE;
+ }
+
+ Vector<uint32_t> tagArray;
+ LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+ "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+ vOps->get_all_tags(vOps, /*out*/ tagArray.editArray());
+
+ sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+ desc->mTagCount = tagCount;
+
+ SortedVector<String8> sections;
+ KeyedVector<uint32_t, String8> tagToSectionMap;
+
+ for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+ uint32_t tag = tagArray[i];
+ if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+ ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+ const char* tagName = vOps->get_tag_name(vOps, tag);
+ if (tagName == NULL) {
+ ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+ desc->mTagToNameMap.add(tag, String8(tagName));
+ const char* sectionName = vOps->get_section_name(vOps, tag);
+ if (sectionName == NULL) {
+ ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+
+ String8 sectionString(sectionName);
+
+ sections.add(sectionString);
+ tagToSectionMap.add(tag, sectionString);
+
+ int tagType = vOps->get_tag_type(vOps, tag);
+ if (tagType < 0 || tagType >= NUM_TYPES) {
+ ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+ return BAD_VALUE;
+ }
+ desc->mTagToTypeMap.insert(std::make_pair(tag, tagType));
+ }
+
+ desc->mSections = sections;
+
+ for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+ uint32_t tag = tagArray[i];
+ const String8& sectionString = tagToSectionMap.valueFor(tag);
+
+ // Set up tag to section index map
+ ssize_t index = sections.indexOf(sectionString);
+ LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+ desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+ // Set up reverse mapping
+ ssize_t reverseIndex = -1;
+ if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+ KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+ reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+ }
+ desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+ }
+
+ descriptor = desc;
+ return OK;
+}
+
+status_t VendorTagDescriptor::setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc) {
+ status_t res = OK;
+ Mutex::Autolock al(sLock);
+ sGlobalVendorTagDescriptor = desc;
+
+ vendor_tag_ops_t* opsPtr = NULL;
+ if (desc != NULL) {
+ opsPtr = &(desc->mVendorOps);
+ opsPtr->get_tag_count = vendor_tag_descriptor_get_tag_count;
+ opsPtr->get_all_tags = vendor_tag_descriptor_get_all_tags;
+ opsPtr->get_section_name = vendor_tag_descriptor_get_section_name;
+ opsPtr->get_tag_name = vendor_tag_descriptor_get_tag_name;
+ opsPtr->get_tag_type = vendor_tag_descriptor_get_tag_type;
+ }
+ if ((res = set_camera_metadata_vendor_ops(opsPtr)) != OK) {
+ ALOGE("%s: Could not set vendor tag descriptor, received error %s (%d).", __FUNCTION__,
+ strerror(-res), res);
+ }
+ return res;
+}
+
+void VendorTagDescriptor::clearGlobalVendorTagDescriptor() {
+ Mutex::Autolock al(sLock);
+ set_camera_metadata_vendor_ops(NULL);
+ sGlobalVendorTagDescriptor.clear();
+}
+
+sp<VendorTagDescriptor> VendorTagDescriptor::getGlobalVendorTagDescriptor() {
+ Mutex::Autolock al(sLock);
+ return sGlobalVendorTagDescriptor;
+}
+
+status_t VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+ const sp<VendorTagDescriptorCache>& cache) {
+ status_t res = OK;
+ Mutex::Autolock al(sLock);
+ sGlobalVendorTagDescriptorCache = cache;
+
+ struct vendor_tag_cache_ops* opsPtr = NULL;
+ if (cache != NULL) {
+ opsPtr = &(cache->mVendorCacheOps);
+ opsPtr->get_tag_count = vendor_tag_descriptor_cache_get_tag_count;
+ opsPtr->get_all_tags = vendor_tag_descriptor_cache_get_all_tags;
+ opsPtr->get_section_name = vendor_tag_descriptor_cache_get_section_name;
+ opsPtr->get_tag_name = vendor_tag_descriptor_cache_get_tag_name;
+ opsPtr->get_tag_type = vendor_tag_descriptor_cache_get_tag_type;
+ }
+ if ((res = set_camera_metadata_vendor_cache_ops(opsPtr)) != OK) {
+ ALOGE("%s: Could not set vendor tag cache, received error %s (%d).", __FUNCTION__,
+ strerror(-res), res);
+ }
+ return res;
+}
+
+void VendorTagDescriptorCache::clearGlobalVendorTagCache() {
+ Mutex::Autolock al(sLock);
+ set_camera_metadata_vendor_cache_ops(NULL);
+ sGlobalVendorTagDescriptorCache.clear();
+}
+
+sp<VendorTagDescriptorCache> VendorTagDescriptorCache::getGlobalVendorTagCache() {
+ Mutex::Autolock al(sLock);
+ return sGlobalVendorTagDescriptorCache;
+}
+
+extern "C" {
+
+int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptor == NULL) {
+ ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+ return VENDOR_TAG_COUNT_ERR;
+ }
+ return sGlobalVendorTagDescriptor->getTagCount();
+}
+
+void vendor_tag_descriptor_get_all_tags(const vendor_tag_ops_t* /*v*/, uint32_t* tagArray) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptor == NULL) {
+ ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+ return;
+ }
+ sGlobalVendorTagDescriptor->getTagArray(tagArray);
+}
+
+const char* vendor_tag_descriptor_get_section_name(const vendor_tag_ops_t* /*v*/, uint32_t tag) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptor == NULL) {
+ ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+ return VENDOR_SECTION_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptor->getSectionName(tag);
+}
+
+const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* /*v*/, uint32_t tag) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptor == NULL) {
+ ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptor->getTagName(tag);
+}
+
+int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* /*v*/, uint32_t tag) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptor == NULL) {
+ ALOGE("%s: Vendor tag descriptor not initialized.", __FUNCTION__);
+ return VENDOR_TAG_TYPE_ERR;
+ }
+ return sGlobalVendorTagDescriptor->getTagType(tag);
+}
+
+int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_COUNT_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagCount(id);
+}
+
+void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray, metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ }
+ sGlobalVendorTagDescriptorCache->getTagArray(tagArray, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag, metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_SECTION_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getSectionName(tag, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag, metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagName(tag, id);
+}
+
+int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag, metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagType(tag, id);
+}
+
+} /* extern "C" */
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
diff --git a/camera/common/default/include/CameraMetadata.h b/camera/common/default/include/CameraMetadata.h
new file mode 100644
index 0000000..b67914e
--- /dev/null
+++ b/camera/common/default/include/CameraMetadata.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAMERA_COMMON_1_0_CAMERAMETADATA_H
+#define CAMERA_COMMON_1_0_CAMERAMETADATA_H
+
+#include "system/camera_metadata.h"
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+class VendorTagDescriptor;
+
+/**
+ * A convenience wrapper around the C-based camera_metadata_t library.
+ */
+class CameraMetadata {
+ public:
+ /** Creates an empty object; best used when expecting to acquire contents
+ * from elsewhere */
+ CameraMetadata();
+ /** Creates an object with space for entryCapacity entries, with
+ * dataCapacity extra storage */
+ CameraMetadata(size_t entryCapacity, size_t dataCapacity = 10);
+
+ ~CameraMetadata();
+
+ /** Takes ownership of passed-in buffer */
+ CameraMetadata(camera_metadata_t* buffer);
+ /** Clones the metadata */
+ CameraMetadata(const CameraMetadata& other);
+
+ /**
+ * Assignment clones metadata buffer.
+ */
+ CameraMetadata& operator=(const CameraMetadata& other);
+ CameraMetadata& operator=(const camera_metadata_t* buffer);
+
+ /**
+ * Get reference to the underlying metadata buffer. Ownership remains with
+ * the CameraMetadata object, but non-const CameraMetadata methods will not
+ * work until unlock() is called. Note that the lock has nothing to do with
+ * thread-safety, it simply prevents the camera_metadata_t pointer returned
+ * here from being accidentally invalidated by CameraMetadata operations.
+ */
+ const camera_metadata_t* getAndLock() const;
+
+ /**
+ * Unlock the CameraMetadata for use again. After this unlock, the pointer
+ * given from getAndLock() may no longer be used. The pointer passed out
+ * from getAndLock must be provided to guarantee that the right object is
+ * being unlocked.
+ */
+ status_t unlock(const camera_metadata_t* buffer) const;
+
+ /**
+ * Release a raw metadata buffer to the caller. After this call,
+ * CameraMetadata no longer references the buffer, and the caller takes
+ * responsibility for freeing the raw metadata buffer (using
+ * free_camera_metadata()), or for handing it to another CameraMetadata
+ * instance.
+ */
+ camera_metadata_t* release();
+
+ /**
+ * Clear the metadata buffer and free all storage used by it
+ */
+ void clear();
+
+ /**
+ * Acquire a raw metadata buffer from the caller. After this call,
+ * the caller no longer owns the raw buffer, and must not free or manipulate it.
+ * If CameraMetadata already contains metadata, it is freed.
+ */
+ void acquire(camera_metadata_t* buffer);
+
+ /**
+ * Acquires raw buffer from other CameraMetadata object. After the call, the argument
+ * object no longer has any metadata.
+ */
+ void acquire(CameraMetadata& other);
+
+ /**
+ * Append metadata from another CameraMetadata object.
+ */
+ status_t append(const CameraMetadata& other);
+
+ /**
+ * Append metadata from a raw camera_metadata buffer
+ */
+ status_t append(const camera_metadata* other);
+
+ /**
+ * Number of metadata entries.
+ */
+ size_t entryCount() const;
+
+ /**
+ * Is the buffer empty (no entires)
+ */
+ bool isEmpty() const;
+
+ /**
+ * Sort metadata buffer for faster find
+ */
+ status_t sort();
+
+ /**
+ * Update metadata entry. Will create entry if it doesn't exist already, and
+ * will reallocate the buffer if insufficient space exists. Overloaded for
+ * the various types of valid data.
+ */
+ status_t update(uint32_t tag, const uint8_t* data, size_t data_count);
+ status_t update(uint32_t tag, const int32_t* data, size_t data_count);
+ status_t update(uint32_t tag, const float* data, size_t data_count);
+ status_t update(uint32_t tag, const int64_t* data, size_t data_count);
+ status_t update(uint32_t tag, const double* data, size_t data_count);
+ status_t update(uint32_t tag, const camera_metadata_rational_t* data, size_t data_count);
+ status_t update(uint32_t tag, const String8& string);
+ status_t update(const camera_metadata_ro_entry& entry);
+
+ template <typename T>
+ status_t update(uint32_t tag, Vector<T> data) {
+ return update(tag, data.array(), data.size());
+ }
+
+ /**
+ * Check if a metadata entry exists for a given tag id
+ *
+ */
+ bool exists(uint32_t tag) const;
+
+ /**
+ * Get metadata entry by tag id
+ */
+ camera_metadata_entry find(uint32_t tag);
+
+ /**
+ * Get metadata entry by tag id, with no editing
+ */
+ camera_metadata_ro_entry find(uint32_t tag) const;
+
+ /**
+ * Delete metadata entry by tag
+ */
+ status_t erase(uint32_t tag);
+
+ /**
+ * Swap the underlying camera metadata between this and the other
+ * metadata object.
+ */
+ void swap(CameraMetadata& other);
+
+ /**
+ * Dump contents into FD for debugging. The verbosity levels are
+ * 0: Tag entry information only, no data values
+ * 1: Level 0 plus at most 16 data values per entry
+ * 2: All information
+ *
+ * The indentation parameter sets the number of spaces to add to the start
+ * each line of output.
+ */
+ void dump(int fd, int verbosity = 1, int indentation = 0) const;
+
+ /**
+ * Find tag id for a given tag name, also checking vendor tags if available.
+ * On success, returns OK and writes the tag id into tag.
+ *
+ * This is a slow method.
+ */
+ static status_t getTagFromName(const char* name, const VendorTagDescriptor* vTags,
+ uint32_t* tag);
+
+ private:
+ camera_metadata_t* mBuffer;
+ mutable bool mLocked;
+
+ /**
+ * Check if tag has a given type
+ */
+ status_t checkType(uint32_t tag, uint8_t expectedType);
+
+ /**
+ * Base update entry method
+ */
+ status_t updateImpl(uint32_t tag, const void* data, size_t data_count);
+
+ /**
+ * Resize metadata buffer if needed by reallocating it and copying it over.
+ */
+ status_t resizeIfNeeded(size_t extraEntries, size_t extraData);
+};
+
+} // namespace helper
+
+// NOTE: Deprecated namespace. This namespace should no longer be used.
+namespace V1_0::helper {
+// Export symbols to the old namespace to preserve compatibility
+typedef android::hardware::camera::common::helper::CameraMetadata CameraMetadata;
+} // namespace V1_0::helper
+
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif
diff --git a/camera/common/default/include/CameraModule.h b/camera/common/default/include/CameraModule.h
new file mode 100644
index 0000000..5c1f8ec
--- /dev/null
+++ b/camera/common/default/include/CameraModule.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAMERA_COMMON_1_0_CAMERAMODULE_H
+#define CAMERA_COMMON_1_0_CAMERAMODULE_H
+
+#include <string>
+#include <unordered_set>
+
+#include <hardware/camera.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+#include "CameraMetadata.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+/**
+ * A wrapper class for HAL camera module.
+ *
+ * This class wraps camera_module_t returned from HAL to provide a wrapped
+ * get_camera_info implementation which CameraService generates some
+ * camera characteristics keys defined in newer HAL version on an older HAL.
+ */
+class CameraModule : public RefBase {
+ public:
+ explicit CameraModule(camera_module_t* module);
+ virtual ~CameraModule();
+
+ // Must be called after construction
+ // Returns OK on success, NO_INIT on failure
+ int init();
+
+ int getCameraInfo(int cameraId, struct camera_info* info);
+ int getDeviceVersion(int cameraId);
+ int getNumberOfCameras(void);
+ int open(const char* id, struct hw_device_t** device);
+ bool isOpenLegacyDefined() const;
+ int openLegacy(const char* id, uint32_t halVersion, struct hw_device_t** device);
+ int setCallbacks(const camera_module_callbacks_t* callbacks);
+ bool isVendorTagDefined() const;
+ void getVendorTagOps(vendor_tag_ops_t* ops);
+ bool isSetTorchModeSupported() const;
+ int setTorchMode(const char* camera_id, bool enable);
+ uint16_t getModuleApiVersion() const;
+ const char* getModuleName() const;
+ uint16_t getHalApiVersion() const;
+ const char* getModuleAuthor() const;
+ // Only used by CameraModuleFixture native test. Do NOT use elsewhere.
+ void* getDso();
+ // Only used by CameraProvider
+ void removeCamera(int cameraId);
+ int getPhysicalCameraInfo(int physicalCameraId, camera_metadata_t** physicalInfo);
+ int isStreamCombinationSupported(int cameraId, camera_stream_combination_t* streams);
+ void notifyDeviceStateChange(uint64_t deviceState);
+
+ static bool isLogicalMultiCamera(const common::helper::CameraMetadata& metadata,
+ std::unordered_set<std::string>* physicalCameraIds);
+
+ private:
+ // Derive camera characteristics keys defined after HAL device version
+ static void deriveCameraCharacteristicsKeys(uint32_t deviceVersion, CameraMetadata& chars);
+ // Helper function to append available[request|result|chars]Keys
+ static void appendAvailableKeys(CameraMetadata& chars, int32_t keyTag,
+ const Vector<int32_t>& appendKeys);
+ status_t filterOpenErrorCode(status_t err);
+ camera_module_t* mModule;
+ int mNumberOfCameras;
+ KeyedVector<int, camera_info> mCameraInfoMap;
+ KeyedVector<int, int> mDeviceVersionMap;
+ KeyedVector<int, camera_metadata_t*> mPhysicalCameraInfoMap;
+ Mutex mCameraInfoLock;
+};
+
+} // namespace helper
+
+// NOTE: Deprecated namespace. This namespace should no longer be used for the following symbols
+namespace V1_0::helper {
+// Export symbols to the old namespace to preserve compatibility
+typedef android::hardware::camera::common::helper::CameraModule CameraModule;
+} // namespace V1_0::helper
+
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif
diff --git a/camera/common/default/include/CameraParameters.h b/camera/common/default/include/CameraParameters.h
new file mode 100644
index 0000000..d2b5075
--- /dev/null
+++ b/camera/common/default/include/CameraParameters.h
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS_H
+#define ANDROID_HARDWARE_CAMERA_PARAMETERS_H
+
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+struct Size {
+ int width;
+ int height;
+
+ Size() {
+ width = 0;
+ height = 0;
+ }
+
+ Size(int w, int h) {
+ width = w;
+ height = h;
+ }
+};
+
+class CameraParameters {
+ public:
+ CameraParameters();
+ CameraParameters(const String8& params) { unflatten(params); }
+ ~CameraParameters();
+
+ String8 flatten() const;
+ void unflatten(const String8& params);
+
+ void set(const char* key, const char* value);
+ void set(const char* key, int value);
+ void setFloat(const char* key, float value);
+ const char* get(const char* key) const;
+ int getInt(const char* key) const;
+ float getFloat(const char* key) const;
+
+ void remove(const char* key);
+
+ void setPreviewSize(int width, int height);
+ void getPreviewSize(int* width, int* height) const;
+ void getSupportedPreviewSizes(Vector<Size>& sizes) const;
+
+ // Set the dimensions in pixels to the given width and height
+ // for video frames. The given width and height must be one
+ // of the supported dimensions returned from
+ // getSupportedVideoSizes(). Must not be called if
+ // getSupportedVideoSizes() returns an empty Vector of Size.
+ void setVideoSize(int width, int height);
+ // Retrieve the current dimensions (width and height)
+ // in pixels for video frames, which must be one of the
+ // supported dimensions returned from getSupportedVideoSizes().
+ // Must not be called if getSupportedVideoSizes() returns an
+ // empty Vector of Size.
+ void getVideoSize(int* width, int* height) const;
+ // Retrieve a Vector of supported dimensions (width and height)
+ // in pixels for video frames. If sizes returned from the method
+ // is empty, the camera does not support calls to setVideoSize()
+ // or getVideoSize(). In adddition, it also indicates that
+ // the camera only has a single output, and does not have
+ // separate output for video frames and preview frame.
+ void getSupportedVideoSizes(Vector<Size>& sizes) const;
+ // Retrieve the preferred preview size (width and height) in pixels
+ // for video recording. The given width and height must be one of
+ // supported preview sizes returned from getSupportedPreviewSizes().
+ // Must not be called if getSupportedVideoSizes() returns an empty
+ // Vector of Size. If getSupportedVideoSizes() returns an empty
+ // Vector of Size, the width and height returned from this method
+ // is invalid, and is "-1x-1".
+ void getPreferredPreviewSizeForVideo(int* width, int* height) const;
+
+ void setPreviewFrameRate(int fps);
+ int getPreviewFrameRate() const;
+ void getPreviewFpsRange(int* min_fps, int* max_fps) const;
+ void setPreviewFormat(const char* format);
+ const char* getPreviewFormat() const;
+ void setPictureSize(int width, int height);
+ void getPictureSize(int* width, int* height) const;
+ void getSupportedPictureSizes(Vector<Size>& sizes) const;
+ void setPictureFormat(const char* format);
+ const char* getPictureFormat() const;
+
+ void dump() const;
+ status_t dump(int fd, const Vector<String16>& args) const;
+
+ /**
+ * Returns a Vector containing the supported preview formats
+ * as enums given in graphics.h.
+ */
+ void getSupportedPreviewFormats(Vector<int>& formats) const;
+
+ // Returns true if no keys are present
+ bool isEmpty() const;
+
+ // Parameter keys to communicate between camera application and driver.
+ // The access (read/write, read only, or write only) is viewed from the
+ // perspective of applications, not driver.
+
+ // Preview frame size in pixels (width x height).
+ // Example value: "480x320". Read/Write.
+ static const char KEY_PREVIEW_SIZE[];
+ // Supported preview frame sizes in pixels.
+ // Example value: "800x600,480x320". Read only.
+ static const char KEY_SUPPORTED_PREVIEW_SIZES[];
+ // The current minimum and maximum preview fps. This controls the rate of
+ // preview frames received (CAMERA_MSG_PREVIEW_FRAME). The minimum and
+ // maximum fps must be one of the elements from
+ // KEY_SUPPORTED_PREVIEW_FPS_RANGE parameter.
+ // Example value: "10500,26623"
+ static const char KEY_PREVIEW_FPS_RANGE[];
+ // The supported preview fps (frame-per-second) ranges. Each range contains
+ // a minimum fps and maximum fps. If minimum fps equals to maximum fps, the
+ // camera outputs frames in fixed frame rate. If not, the camera outputs
+ // frames in auto frame rate. The actual frame rate fluctuates between the
+ // minimum and the maximum. The list has at least one element. The list is
+ // sorted from small to large (first by maximum fps and then minimum fps).
+ // Example value: "(10500,26623),(15000,26623),(30000,30000)"
+ static const char KEY_SUPPORTED_PREVIEW_FPS_RANGE[];
+ // The image format for preview frames. See CAMERA_MSG_PREVIEW_FRAME in
+ // frameworks/av/include/camera/Camera.h. The default is
+ // PIXEL_FORMAT_YUV420SP. Example value: "yuv420sp" or PIXEL_FORMAT_XXX
+ // constants. Read/write.
+ static const char KEY_PREVIEW_FORMAT[];
+ // Supported image formats for preview frames.
+ // Example value: "yuv420sp,yuv422i-yuyv". Read only.
+ static const char KEY_SUPPORTED_PREVIEW_FORMATS[];
+ // Number of preview frames per second. This is the target frame rate. The
+ // actual frame rate depends on the driver.
+ // Example value: "15". Read/write.
+ static const char KEY_PREVIEW_FRAME_RATE[];
+ // Supported number of preview frames per second.
+ // Example value: "24,15,10". Read.
+ static const char KEY_SUPPORTED_PREVIEW_FRAME_RATES[];
+ // The dimensions for captured pictures in pixels (width x height).
+ // Example value: "1024x768". Read/write.
+ static const char KEY_PICTURE_SIZE[];
+ // Supported dimensions for captured pictures in pixels.
+ // Example value: "2048x1536,1024x768". Read only.
+ static const char KEY_SUPPORTED_PICTURE_SIZES[];
+ // The image format for captured pictures. See CAMERA_MSG_COMPRESSED_IMAGE
+ // in frameworks/base/include/camera/Camera.h.
+ // Example value: "jpeg" or PIXEL_FORMAT_XXX constants. Read/write.
+ static const char KEY_PICTURE_FORMAT[];
+ // Supported image formats for captured pictures.
+ // Example value: "jpeg,rgb565". Read only.
+ static const char KEY_SUPPORTED_PICTURE_FORMATS[];
+ // The width (in pixels) of EXIF thumbnail in Jpeg picture.
+ // Example value: "512". Read/write.
+ static const char KEY_JPEG_THUMBNAIL_WIDTH[];
+ // The height (in pixels) of EXIF thumbnail in Jpeg picture.
+ // Example value: "384". Read/write.
+ static const char KEY_JPEG_THUMBNAIL_HEIGHT[];
+ // Supported EXIF thumbnail sizes (width x height). 0x0 means not thumbnail
+ // in EXIF.
+ // Example value: "512x384,320x240,0x0". Read only.
+ static const char KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[];
+ // The quality of the EXIF thumbnail in Jpeg picture. The range is 1 to 100,
+ // with 100 being the best.
+ // Example value: "90". Read/write.
+ static const char KEY_JPEG_THUMBNAIL_QUALITY[];
+ // Jpeg quality of captured picture. The range is 1 to 100, with 100 being
+ // the best.
+ // Example value: "90". Read/write.
+ static const char KEY_JPEG_QUALITY[];
+ // The rotation angle in degrees relative to the orientation of the camera.
+ // This affects the pictures returned from CAMERA_MSG_COMPRESSED_IMAGE. The
+ // camera driver may set orientation in the EXIF header without rotating the
+ // picture. Or the driver may rotate the picture and the EXIF thumbnail. If
+ // the Jpeg picture is rotated, the orientation in the EXIF header will be
+ // missing or 1 (row #0 is top and column #0 is left side).
+ //
+ // Note that the JPEG pictures of front-facing cameras are not mirrored
+ // as in preview display.
+ //
+ // For example, suppose the natural orientation of the device is portrait.
+ // The device is rotated 270 degrees clockwise, so the device orientation is
+ // 270. Suppose a back-facing camera sensor is mounted in landscape and the
+ // top side of the camera sensor is aligned with the right edge of the
+ // display in natural orientation. So the camera orientation is 90. The
+ // rotation should be set to 0 (270 + 90).
+ //
+ // Example value: "0" or "90" or "180" or "270". Write only.
+ static const char KEY_ROTATION[];
+ // GPS latitude coordinate. GPSLatitude and GPSLatitudeRef will be stored in
+ // JPEG EXIF header.
+ // Example value: "25.032146" or "-33.462809". Write only.
+ static const char KEY_GPS_LATITUDE[];
+ // GPS longitude coordinate. GPSLongitude and GPSLongitudeRef will be stored
+ // in JPEG EXIF header.
+ // Example value: "121.564448" or "-70.660286". Write only.
+ static const char KEY_GPS_LONGITUDE[];
+ // GPS altitude. GPSAltitude and GPSAltitudeRef will be stored in JPEG EXIF
+ // header.
+ // Example value: "21.0" or "-5". Write only.
+ static const char KEY_GPS_ALTITUDE[];
+ // GPS timestamp (UTC in seconds since January 1, 1970). This should be
+ // stored in JPEG EXIF header.
+ // Example value: "1251192757". Write only.
+ static const char KEY_GPS_TIMESTAMP[];
+ // GPS Processing Method
+ // Example value: "GPS" or "NETWORK". Write only.
+ static const char KEY_GPS_PROCESSING_METHOD[];
+ // Current white balance setting.
+ // Example value: "auto" or WHITE_BALANCE_XXX constants. Read/write.
+ static const char KEY_WHITE_BALANCE[];
+ // Supported white balance settings.
+ // Example value: "auto,incandescent,daylight". Read only.
+ static const char KEY_SUPPORTED_WHITE_BALANCE[];
+ // Current color effect setting.
+ // Example value: "none" or EFFECT_XXX constants. Read/write.
+ static const char KEY_EFFECT[];
+ // Supported color effect settings.
+ // Example value: "none,mono,sepia". Read only.
+ static const char KEY_SUPPORTED_EFFECTS[];
+ // Current antibanding setting.
+ // Example value: "auto" or ANTIBANDING_XXX constants. Read/write.
+ static const char KEY_ANTIBANDING[];
+ // Supported antibanding settings.
+ // Example value: "auto,50hz,60hz,off". Read only.
+ static const char KEY_SUPPORTED_ANTIBANDING[];
+ // Current scene mode.
+ // Example value: "auto" or SCENE_MODE_XXX constants. Read/write.
+ static const char KEY_SCENE_MODE[];
+ // Supported scene mode settings.
+ // Example value: "auto,night,fireworks". Read only.
+ static const char KEY_SUPPORTED_SCENE_MODES[];
+ // Current flash mode.
+ // Example value: "auto" or FLASH_MODE_XXX constants. Read/write.
+ static const char KEY_FLASH_MODE[];
+ // Supported flash modes.
+ // Example value: "auto,on,off". Read only.
+ static const char KEY_SUPPORTED_FLASH_MODES[];
+ // Current focus mode. This will not be empty. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus if focus mode is
+ // FOCUS_MODE_AUTO or FOCUS_MODE_MACRO.
+ // Example value: "auto" or FOCUS_MODE_XXX constants. Read/write.
+ static const char KEY_FOCUS_MODE[];
+ // Supported focus modes.
+ // Example value: "auto,macro,fixed". Read only.
+ static const char KEY_SUPPORTED_FOCUS_MODES[];
+ // The maximum number of focus areas supported. This is the maximum length
+ // of KEY_FOCUS_AREAS.
+ // Example value: "0" or "2". Read only.
+ static const char KEY_MAX_NUM_FOCUS_AREAS[];
+ // Current focus areas.
+ //
+ // Before accessing this parameter, apps should check
+ // KEY_MAX_NUM_FOCUS_AREAS first to know the maximum number of focus areas
+ // first. If the value is 0, focus area is not supported.
+ //
+ // Each focus area is a five-element int array. The first four elements are
+ // the rectangle of the area (left, top, right, bottom). The direction is
+ // relative to the sensor orientation, that is, what the sensor sees. The
+ // direction is not affected by the rotation or mirroring of
+ // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates range from -1000 to 1000.
+ // (-1000,-1000) is the upper left point. (1000, 1000) is the lower right
+ // point. The width and height of focus areas cannot be 0 or negative.
+ //
+ // The fifth element is the weight. Values for weight must range from 1 to
+ // 1000. The weight should be interpreted as a per-pixel weight - all
+ // pixels in the area have the specified weight. This means a small area
+ // with the same weight as a larger area will have less influence on the
+ // focusing than the larger area. Focus areas can partially overlap and the
+ // driver will add the weights in the overlap region.
+ //
+ // A special case of single focus area (0,0,0,0,0) means driver to decide
+ // the focus area. For example, the driver may use more signals to decide
+ // focus areas and change them dynamically. Apps can set (0,0,0,0,0) if they
+ // want the driver to decide focus areas.
+ //
+ // Focus areas are relative to the current field of view (KEY_ZOOM). No
+ // matter what the zoom level is, (-1000,-1000) represents the top of the
+ // currently visible camera frame. The focus area cannot be set to be
+ // outside the current field of view, even when using zoom.
+ //
+ // Focus area only has effect if the current focus mode is FOCUS_MODE_AUTO,
+ // FOCUS_MODE_MACRO, FOCUS_MODE_CONTINUOUS_VIDEO, or
+ // FOCUS_MODE_CONTINUOUS_PICTURE.
+ // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
+ static const char KEY_FOCUS_AREAS[];
+ // Focal length in millimeter.
+ // Example value: "4.31". Read only.
+ static const char KEY_FOCAL_LENGTH[];
+ // Horizontal angle of view in degrees.
+ // Example value: "54.8". Read only.
+ static const char KEY_HORIZONTAL_VIEW_ANGLE[];
+ // Vertical angle of view in degrees.
+ // Example value: "42.5". Read only.
+ static const char KEY_VERTICAL_VIEW_ANGLE[];
+ // Exposure compensation index. 0 means exposure is not adjusted.
+ // Example value: "-5" or "5". Read/write.
+ static const char KEY_EXPOSURE_COMPENSATION[];
+ // The maximum exposure compensation index (>=0).
+ // Example value: "6". Read only.
+ static const char KEY_MAX_EXPOSURE_COMPENSATION[];
+ // The minimum exposure compensation index (<=0).
+ // Example value: "-6". Read only.
+ static const char KEY_MIN_EXPOSURE_COMPENSATION[];
+ // The exposure compensation step. Exposure compensation index multiply by
+ // step eqals to EV. Ex: if exposure compensation index is -6 and step is
+ // 0.3333, EV is -2.
+ // Example value: "0.333333333" or "0.5". Read only.
+ static const char KEY_EXPOSURE_COMPENSATION_STEP[];
+ // The state of the auto-exposure lock. "true" means that
+ // auto-exposure is locked to its current value and will not
+ // change. "false" means the auto-exposure routine is free to
+ // change exposure values. If auto-exposure is already locked,
+ // setting this to true again has no effect (the driver will not
+ // recalculate exposure values). Changing exposure compensation
+ // settings will still affect the exposure settings while
+ // auto-exposure is locked. Stopping preview or taking a still
+ // image will not change the lock. In conjunction with
+ // exposure compensation, this allows for capturing multi-exposure
+ // brackets with known relative exposure values. Locking
+ // auto-exposure after open but before the first call to
+ // startPreview may result in severely over- or under-exposed
+ // images. The driver will not change the AE lock after
+ // auto-focus completes.
+ static const char KEY_AUTO_EXPOSURE_LOCK[];
+ // Whether locking the auto-exposure is supported. "true" means it is, and
+ // "false" or this key not existing means it is not supported.
+ static const char KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[];
+ // The state of the auto-white balance lock. "true" means that
+ // auto-white balance is locked to its current value and will not
+ // change. "false" means the auto-white balance routine is free to
+ // change white balance values. If auto-white balance is already
+ // locked, setting this to true again has no effect (the driver
+ // will not recalculate white balance values). Stopping preview or
+ // taking a still image will not change the lock. In conjunction
+ // with exposure compensation, this allows for capturing
+ // multi-exposure brackets with fixed white balance. Locking
+ // auto-white balance after open but before the first call to
+ // startPreview may result in severely incorrect color. The
+ // driver will not change the AWB lock after auto-focus
+ // completes.
+ static const char KEY_AUTO_WHITEBALANCE_LOCK[];
+ // Whether locking the auto-white balance is supported. "true"
+ // means it is, and "false" or this key not existing means it is
+ // not supported.
+ static const char KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[];
+
+ // The maximum number of metering areas supported. This is the maximum
+ // length of KEY_METERING_AREAS.
+ // Example value: "0" or "2". Read only.
+ static const char KEY_MAX_NUM_METERING_AREAS[];
+ // Current metering areas. Camera driver uses these areas to decide
+ // exposure.
+ //
+ // Before accessing this parameter, apps should check
+ // KEY_MAX_NUM_METERING_AREAS first to know the maximum number of metering
+ // areas first. If the value is 0, metering area is not supported.
+ //
+ // Each metering area is a rectangle with specified weight. The direction is
+ // relative to the sensor orientation, that is, what the sensor sees. The
+ // direction is not affected by the rotation or mirroring of
+ // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates of the rectangle range
+ // from -1000 to 1000. (-1000, -1000) is the upper left point. (1000, 1000)
+ // is the lower right point. The width and height of metering areas cannot
+ // be 0 or negative.
+ //
+ // The fifth element is the weight. Values for weight must range from 1 to
+ // 1000. The weight should be interpreted as a per-pixel weight - all
+ // pixels in the area have the specified weight. This means a small area
+ // with the same weight as a larger area will have less influence on the
+ // metering than the larger area. Metering areas can partially overlap and
+ // the driver will add the weights in the overlap region.
+ //
+ // A special case of all-zero single metering area means driver to decide
+ // the metering area. For example, the driver may use more signals to decide
+ // metering areas and change them dynamically. Apps can set all-zero if they
+ // want the driver to decide metering areas.
+ //
+ // Metering areas are relative to the current field of view (KEY_ZOOM).
+ // No matter what the zoom level is, (-1000,-1000) represents the top of the
+ // currently visible camera frame. The metering area cannot be set to be
+ // outside the current field of view, even when using zoom.
+ //
+ // No matter what metering areas are, the final exposure are compensated
+ // by KEY_EXPOSURE_COMPENSATION.
+ // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
+ static const char KEY_METERING_AREAS[];
+ // Current zoom value.
+ // Example value: "0" or "6". Read/write.
+ static const char KEY_ZOOM[];
+ // Maximum zoom value.
+ // Example value: "6". Read only.
+ static const char KEY_MAX_ZOOM[];
+ // The zoom ratios of all zoom values. The zoom ratio is in 1/100
+ // increments. Ex: a zoom of 3.2x is returned as 320. The number of list
+ // elements is KEY_MAX_ZOOM + 1. The first element is always 100. The last
+ // element is the zoom ratio of zoom value KEY_MAX_ZOOM.
+ // Example value: "100,150,200,250,300,350,400". Read only.
+ static const char KEY_ZOOM_RATIOS[];
+ // Whether zoom is supported. Zoom is supported if the value is "true". Zoom
+ // is not supported if the value is not "true" or the key does not exist.
+ // Example value: "true". Read only.
+ static const char KEY_ZOOM_SUPPORTED[];
+ // Whether if smooth zoom is supported. Smooth zoom is supported if the
+ // value is "true". It is not supported if the value is not "true" or the
+ // key does not exist.
+ // See CAMERA_CMD_START_SMOOTH_ZOOM, CAMERA_CMD_STOP_SMOOTH_ZOOM, and
+ // CAMERA_MSG_ZOOM in frameworks/base/include/camera/Camera.h.
+ // Example value: "true". Read only.
+ static const char KEY_SMOOTH_ZOOM_SUPPORTED[];
+
+ // The distances (in meters) from the camera to where an object appears to
+ // be in focus. The object is sharpest at the optimal focus distance. The
+ // depth of field is the far focus distance minus near focus distance.
+ //
+ // Focus distances may change after starting auto focus, canceling auto
+ // focus, or starting the preview. Applications can read this anytime to get
+ // the latest focus distances. If the focus mode is FOCUS_MODE_CONTINUOUS,
+ // focus distances may change from time to time.
+ //
+ // This is intended to estimate the distance between the camera and the
+ // subject. After autofocus, the subject distance may be within near and far
+ // focus distance. However, the precision depends on the camera hardware,
+ // autofocus algorithm, the focus area, and the scene. The error can be
+ // large and it should be only used as a reference.
+ //
+ // Far focus distance > optimal focus distance > near focus distance. If
+ // the far focus distance is infinity, the value should be "Infinity" (case
+ // sensitive). The format is three float values separated by commas. The
+ // first is near focus distance. The second is optimal focus distance. The
+ // third is far focus distance.
+ // Example value: "0.95,1.9,Infinity" or "0.049,0.05,0.051". Read only.
+ static const char KEY_FOCUS_DISTANCES[];
+
+ // The current dimensions in pixels (width x height) for video frames.
+ // The width and height must be one of the supported sizes retrieved
+ // via KEY_SUPPORTED_VIDEO_SIZES.
+ // Example value: "1280x720". Read/write.
+ static const char KEY_VIDEO_SIZE[];
+ // A list of the supported dimensions in pixels (width x height)
+ // for video frames. See CAMERA_MSG_VIDEO_FRAME for details in
+ // frameworks/base/include/camera/Camera.h.
+ // Example: "176x144,1280x720". Read only.
+ static const char KEY_SUPPORTED_VIDEO_SIZES[];
+
+ // The maximum number of detected faces supported by hardware face
+ // detection. If the value is 0, hardware face detection is not supported.
+ // Example: "5". Read only
+ static const char KEY_MAX_NUM_DETECTED_FACES_HW[];
+
+ // The maximum number of detected faces supported by software face
+ // detection. If the value is 0, software face detection is not supported.
+ // Example: "5". Read only
+ static const char KEY_MAX_NUM_DETECTED_FACES_SW[];
+
+ // Preferred preview frame size in pixels for video recording.
+ // The width and height must be one of the supported sizes retrieved
+ // via KEY_SUPPORTED_PREVIEW_SIZES. This key can be used only when
+ // getSupportedVideoSizes() does not return an empty Vector of Size.
+ // Camcorder applications are recommended to set the preview size
+ // to a value that is not larger than the preferred preview size.
+ // In other words, the product of the width and height of the
+ // preview size should not be larger than that of the preferred
+ // preview size. In addition, we recommend to choos a preview size
+ // that has the same aspect ratio as the resolution of video to be
+ // recorded.
+ // Example value: "800x600". Read only.
+ static const char KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[];
+
+ // The image format for video frames. See CAMERA_MSG_VIDEO_FRAME in
+ // frameworks/base/include/camera/Camera.h.
+ // Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read only.
+ static const char KEY_VIDEO_FRAME_FORMAT[];
+
+ // Sets the hint of the recording mode. If this is true, MediaRecorder.start
+ // may be faster or has less glitches. This should be called before starting
+ // the preview for the best result. But it is allowed to change the hint
+ // while the preview is active. The default value is false.
+ //
+ // The apps can still call Camera.takePicture when the hint is true. The
+ // apps can call MediaRecorder.start when the hint is false. But the
+ // performance may be worse.
+ // Example value: "true" or "false". Read/write.
+ static const char KEY_RECORDING_HINT[];
+
+ // Returns true if video snapshot is supported. That is, applications
+ // can call Camera.takePicture during recording. Applications do not need to
+ // call Camera.startPreview after taking a picture. The preview will be
+ // still active. Other than that, taking a picture during recording is
+ // identical to taking a picture normally. All settings and methods related
+ // to takePicture work identically. Ex: KEY_PICTURE_SIZE,
+ // KEY_SUPPORTED_PICTURE_SIZES, KEY_JPEG_QUALITY, KEY_ROTATION, and etc.
+ // The picture will have an EXIF header. FLASH_MODE_AUTO and FLASH_MODE_ON
+ // also still work, but the video will record the flash.
+ //
+ // Applications can set shutter callback as null to avoid the shutter
+ // sound. It is also recommended to set raw picture and post view callbacks
+ // to null to avoid the interrupt of preview display.
+ //
+ // Field-of-view of the recorded video may be different from that of the
+ // captured pictures.
+ // Example value: "true" or "false". Read only.
+ static const char KEY_VIDEO_SNAPSHOT_SUPPORTED[];
+
+ // The state of the video stabilization. If set to true, both the
+ // preview stream and the recorded video stream are stabilized by
+ // the camera. Only valid to set if KEY_VIDEO_STABILIZATION_SUPPORTED is
+ // set to true.
+ //
+ // The value of this key can be changed any time the camera is
+ // open. If preview or recording is active, it is acceptable for
+ // there to be a slight video glitch when video stabilization is
+ // toggled on and off.
+ //
+ // This only stabilizes video streams (between-frames stabilization), and
+ // has no effect on still image capture.
+ static const char KEY_VIDEO_STABILIZATION[];
+
+ // Returns true if video stabilization is supported. That is, applications
+ // can set KEY_VIDEO_STABILIZATION to true and have a stabilized preview
+ // stream and record stabilized videos.
+ static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
+
+ // Supported modes for special effects with light.
+ // Example values: "lowlight,hdr".
+ static const char KEY_LIGHTFX[];
+
+ // Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
+ static const char TRUE[];
+ static const char FALSE[];
+
+ // Value for KEY_FOCUS_DISTANCES.
+ static const char FOCUS_DISTANCE_INFINITY[];
+
+ // Values for white balance settings.
+ static const char WHITE_BALANCE_AUTO[];
+ static const char WHITE_BALANCE_INCANDESCENT[];
+ static const char WHITE_BALANCE_FLUORESCENT[];
+ static const char WHITE_BALANCE_WARM_FLUORESCENT[];
+ static const char WHITE_BALANCE_DAYLIGHT[];
+ static const char WHITE_BALANCE_CLOUDY_DAYLIGHT[];
+ static const char WHITE_BALANCE_TWILIGHT[];
+ static const char WHITE_BALANCE_SHADE[];
+
+ // Values for effect settings.
+ static const char EFFECT_NONE[];
+ static const char EFFECT_MONO[];
+ static const char EFFECT_NEGATIVE[];
+ static const char EFFECT_SOLARIZE[];
+ static const char EFFECT_SEPIA[];
+ static const char EFFECT_POSTERIZE[];
+ static const char EFFECT_WHITEBOARD[];
+ static const char EFFECT_BLACKBOARD[];
+ static const char EFFECT_AQUA[];
+
+ // Values for antibanding settings.
+ static const char ANTIBANDING_AUTO[];
+ static const char ANTIBANDING_50HZ[];
+ static const char ANTIBANDING_60HZ[];
+ static const char ANTIBANDING_OFF[];
+
+ // Values for flash mode settings.
+ // Flash will not be fired.
+ static const char FLASH_MODE_OFF[];
+ // Flash will be fired automatically when required. The flash may be fired
+ // during preview, auto-focus, or snapshot depending on the driver.
+ static const char FLASH_MODE_AUTO[];
+ // Flash will always be fired during snapshot. The flash may also be
+ // fired during preview or auto-focus depending on the driver.
+ static const char FLASH_MODE_ON[];
+ // Flash will be fired in red-eye reduction mode.
+ static const char FLASH_MODE_RED_EYE[];
+ // Constant emission of light during preview, auto-focus and snapshot.
+ // This can also be used for video recording.
+ static const char FLASH_MODE_TORCH[];
+
+ // Values for scene mode settings.
+ static const char SCENE_MODE_AUTO[];
+ static const char SCENE_MODE_ACTION[];
+ static const char SCENE_MODE_PORTRAIT[];
+ static const char SCENE_MODE_LANDSCAPE[];
+ static const char SCENE_MODE_NIGHT[];
+ static const char SCENE_MODE_NIGHT_PORTRAIT[];
+ static const char SCENE_MODE_THEATRE[];
+ static const char SCENE_MODE_BEACH[];
+ static const char SCENE_MODE_SNOW[];
+ static const char SCENE_MODE_SUNSET[];
+ static const char SCENE_MODE_STEADYPHOTO[];
+ static const char SCENE_MODE_FIREWORKS[];
+ static const char SCENE_MODE_SPORTS[];
+ static const char SCENE_MODE_PARTY[];
+ static const char SCENE_MODE_CANDLELIGHT[];
+ // Applications are looking for a barcode. Camera driver will be optimized
+ // for barcode reading.
+ static const char SCENE_MODE_BARCODE[];
+ // A high-dynamic range mode. In this mode, the HAL module will use a
+ // capture strategy that extends the dynamic range of the captured
+ // image in some fashion. Only the final image is returned.
+ static const char SCENE_MODE_HDR[];
+
+ // Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT,
+ // and KEY_VIDEO_FRAME_FORMAT
+ static const char PIXEL_FORMAT_YUV422SP[];
+ static const char PIXEL_FORMAT_YUV420SP[]; // NV21
+ static const char PIXEL_FORMAT_YUV422I[]; // YUY2
+ static const char PIXEL_FORMAT_YUV420P[]; // YV12
+ static const char PIXEL_FORMAT_RGB565[];
+ static const char PIXEL_FORMAT_RGBA8888[];
+ static const char PIXEL_FORMAT_JPEG[];
+ // Raw bayer format used for images, which is 10 bit precision samples
+ // stored in 16 bit words. The filter pattern is RGGB.
+ static const char PIXEL_FORMAT_BAYER_RGGB[];
+ // Pixel format is not known to the framework
+ static const char PIXEL_FORMAT_ANDROID_OPAQUE[];
+
+ // Values for focus mode settings.
+ // Auto-focus mode. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus in this mode.
+ static const char FOCUS_MODE_AUTO[];
+ // Focus is set at infinity. Applications should not call
+ // CameraHardwareInterface.autoFocus in this mode.
+ static const char FOCUS_MODE_INFINITY[];
+ // Macro (close-up) focus mode. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus in this mode.
+ static const char FOCUS_MODE_MACRO[];
+ // Focus is fixed. The camera is always in this mode if the focus is not
+ // adjustable. If the camera has auto-focus, this mode can fix the
+ // focus, which is usually at hyperfocal distance. Applications should
+ // not call CameraHardwareInterface.autoFocus in this mode.
+ static const char FOCUS_MODE_FIXED[];
+ // Extended depth of field (EDOF). Focusing is done digitally and
+ // continuously. Applications should not call
+ // CameraHardwareInterface.autoFocus in this mode.
+ static const char FOCUS_MODE_EDOF[];
+ // Continuous auto focus mode intended for video recording. The camera
+ // continuously tries to focus. This is the best choice for video
+ // recording because the focus changes smoothly . Applications still can
+ // call CameraHardwareInterface.takePicture in this mode but the subject may
+ // not be in focus. Auto focus starts when the parameter is set.
+ //
+ // Applications can call CameraHardwareInterface.autoFocus in this mode. The
+ // focus callback will immediately return with a boolean that indicates
+ // whether the focus is sharp or not. The focus position is locked after
+ // autoFocus call. If applications want to resume the continuous focus,
+ // cancelAutoFocus must be called. Restarting the preview will not resume
+ // the continuous autofocus. To stop continuous focus, applications should
+ // change the focus mode to other modes.
+ static const char FOCUS_MODE_CONTINUOUS_VIDEO[];
+ // Continuous auto focus mode intended for taking pictures. The camera
+ // continuously tries to focus. The speed of focus change is more aggressive
+ // than FOCUS_MODE_CONTINUOUS_VIDEO. Auto focus starts when the parameter is
+ // set.
+ //
+ // Applications can call CameraHardwareInterface.autoFocus in this mode. If
+ // the autofocus is in the middle of scanning, the focus callback will
+ // return when it completes. If the autofocus is not scanning, focus
+ // callback will immediately return with a boolean that indicates whether
+ // the focus is sharp or not. The apps can then decide if they want to take
+ // a picture immediately or to change the focus mode to auto, and run a full
+ // autofocus cycle. The focus position is locked after autoFocus call. If
+ // applications want to resume the continuous focus, cancelAutoFocus must be
+ // called. Restarting the preview will not resume the continuous autofocus.
+ // To stop continuous focus, applications should change the focus mode to
+ // other modes.
+ static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+
+ // Values for light special effects
+ // Low-light enhancement mode
+ static const char LIGHTFX_LOWLIGHT[];
+ // High-dynamic range mode
+ static const char LIGHTFX_HDR[];
+
+ /**
+ * Returns the the supported preview formats as an enum given in graphics.h
+ * corrsponding to the format given in the input string or -1 if no such
+ * conversion exists.
+ */
+ static int previewFormatToEnum(const char* format);
+
+ private:
+ DefaultKeyedVector<String8, String8> mMap;
+};
+
+}; // namespace helper
+
+// NOTE: Deprecated namespace. This namespace should no longer be used for the following symbols
+namespace V1_0::helper {
+// Export symbols to the old namespace to preserve compatibility
+typedef android::hardware::camera::common::helper::CameraParameters CameraParameters;
+typedef android::hardware::camera::common::helper::Size Size;
+} // namespace V1_0::helper
+
+}; // namespace common
+}; // namespace camera
+}; // namespace hardware
+}; // namespace android
+
+#endif
diff --git a/camera/common/default/include/Exif.h b/camera/common/default/include/Exif.h
new file mode 100644
index 0000000..6974b8e
--- /dev/null
+++ b/camera/common/default/include/Exif.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_CAMERA_COMMON_1_0_EXIF_H
+#define ANDROID_HARDWARE_INTERFACES_CAMERA_COMMON_1_0_EXIF_H
+
+#include "CameraMetadata.h"
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+// This is based on the original ChromeOS ARC implementation of a V4L2 HAL
+
+// ExifUtils can generate APP1 segment with tags which caller set. ExifUtils can
+// also add a thumbnail in the APP1 segment if thumbnail size is specified.
+// ExifUtils can be reused with different images by calling initialize().
+//
+// Example of using this class :
+// std::unique_ptr<ExifUtils> utils(ExifUtils::Create());
+// utils->initialize();
+// ...
+// // Call ExifUtils functions to set Exif tags.
+// ...
+// utils->GenerateApp1(thumbnail_buffer, thumbnail_size);
+// unsigned int app1Length = utils->GetApp1Length();
+// uint8_t* app1Buffer = new uint8_t[app1Length];
+// memcpy(app1Buffer, utils->GetApp1Buffer(), app1Length);
+class ExifUtils {
+ public:
+ virtual ~ExifUtils();
+
+ static ExifUtils* create();
+
+ // Initialize() can be called multiple times. The setting of Exif tags will be
+ // cleared.
+ virtual bool initialize() = 0;
+
+ // Set all known fields from a metadata structure
+ virtual bool setFromMetadata(const CameraMetadata& metadata, const size_t imageWidth,
+ const size_t imageHeight) = 0;
+
+ // Sets the len aperture.
+ // Returns false if memory allocation fails.
+ virtual bool setAperture(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the value of brightness.
+ // Returns false if memory allocation fails.
+ virtual bool setBrightness(int32_t numerator, int32_t denominator) = 0;
+
+ // Sets the color space.
+ // Returns false if memory allocation fails.
+ virtual bool setColorSpace(uint16_t color_space) = 0;
+
+ // Sets the information to compressed data.
+ // Returns false if memory allocation fails.
+ virtual bool setComponentsConfiguration(const std::string& components_configuration) = 0;
+
+ // Sets the compression scheme used for the image data.
+ // Returns false if memory allocation fails.
+ virtual bool setCompression(uint16_t compression) = 0;
+
+ // Sets image contrast.
+ // Returns false if memory allocation fails.
+ virtual bool setContrast(uint16_t contrast) = 0;
+
+ // Sets the date and time of image last modified. It takes local time. The
+ // name of the tag is DateTime in IFD0.
+ // Returns false if memory allocation fails.
+ virtual bool setDateTime(const struct tm& t) = 0;
+
+ // Sets the image description.
+ // Returns false if memory allocation fails.
+ virtual bool setDescription(const std::string& description) = 0;
+
+ // Sets the digital zoom ratio. If the numerator is 0, it means digital zoom
+ // was not used.
+ // Returns false if memory allocation fails.
+ virtual bool setDigitalZoomRatio(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the exposure bias.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureBias(int32_t numerator, int32_t denominator) = 0;
+
+ // Sets the exposure mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureMode(uint16_t exposure_mode) = 0;
+
+ // Sets the program used by the camera to set exposure when the picture is
+ // taken.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureProgram(uint16_t exposure_program) = 0;
+
+ // Sets the exposure time, given in seconds.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureTime(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the status of flash.
+ // Returns false if memory allocation fails.
+ virtual bool setFlash(uint16_t flash) = 0;
+
+ // Sets the F number.
+ // Returns false if memory allocation fails.
+ virtual bool setFNumber(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the focal length of lens used to take the image in millimeters.
+ // Returns false if memory allocation fails.
+ virtual bool setFocalLength(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the degree of overall image gain adjustment.
+ // Returns false if memory allocation fails.
+ virtual bool setGainControl(uint16_t gain_control) = 0;
+
+ // Sets the altitude in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsAltitude(double altitude) = 0;
+
+ // Sets the latitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLatitude(double latitude) = 0;
+
+ // Sets the longitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLongitude(double longitude) = 0;
+
+ // Sets GPS processing method.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsProcessingMethod(const std::string& method) = 0;
+
+ // Sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsTimestamp(const struct tm& t) = 0;
+
+ // Sets the height (number of rows) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageHeight(uint32_t length) = 0;
+
+ // Sets the width (number of columns) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageWidth(uint32_t width) = 0;
+
+ // Sets the ISO speed.
+ // Returns false if memory allocation fails.
+ virtual bool setIsoSpeedRating(uint16_t iso_speed_ratings) = 0;
+
+ // Sets the kind of light source.
+ // Returns false if memory allocation fails.
+ virtual bool setLightSource(uint16_t light_source) = 0;
+
+ // Sets the smallest F number of the lens.
+ // Returns false if memory allocation fails.
+ virtual bool setMaxAperture(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the metering mode.
+ // Returns false if memory allocation fails.
+ virtual bool setMeteringMode(uint16_t metering_mode) = 0;
+
+ // Sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientation(uint16_t orientation) = 0;
+
+ // Sets the unit for measuring XResolution and YResolution.
+ // Returns false if memory allocation fails.
+ virtual bool setResolutionUnit(uint16_t resolution_unit) = 0;
+
+ // Sets image saturation.
+ // Returns false if memory allocation fails.
+ virtual bool setSaturation(uint16_t saturation) = 0;
+
+ // Sets the type of scene that was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setSceneCaptureType(uint16_t type) = 0;
+
+ // Sets image sharpness.
+ // Returns false if memory allocation fails.
+ virtual bool setSharpness(uint16_t sharpness) = 0;
+
+ // Sets the shutter speed.
+ // Returns false if memory allocation fails.
+ virtual bool setShutterSpeed(int32_t numerator, int32_t denominator) = 0;
+
+ // Sets the distance to the subject, given in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setSubjectDistance(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the fractions of seconds for the <DateTime> tag.
+ // Returns false if memory allocation fails.
+ virtual bool setSubsecTime(const std::string& subsec_time) = 0;
+
+ // Sets the white balance mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setWhiteBalance(uint16_t white_balance) = 0;
+
+ // Sets the number of pixels per resolution unit in the image width.
+ // Returns false if memory allocation fails.
+ virtual bool setXResolution(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the position of chrominance components in relation to the luminance
+ // component.
+ // Returns false if memory allocation fails.
+ virtual bool setYCbCrPositioning(uint16_t ycbcr_positioning) = 0;
+
+ // Sets the number of pixels per resolution unit in the image length.
+ // Returns false if memory allocation fails.
+ virtual bool setYResolution(uint32_t numerator, uint32_t denominator) = 0;
+
+ // Sets the manufacturer of camera.
+ // Returns false if memory allocation fails.
+ virtual bool setMake(const std::string& make) = 0;
+
+ // Sets the model number of camera.
+ // Returns false if memory allocation fails.
+ virtual bool setModel(const std::string& model) = 0;
+
+ // Generates APP1 segment.
+ // Returns false if generating APP1 segment fails.
+ virtual bool generateApp1(const void* thumbnail_buffer, uint32_t size) = 0;
+
+ // Gets buffer of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual const uint8_t* getApp1Buffer() = 0;
+
+ // Gets length of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual unsigned int getApp1Length() = 0;
+};
+
+} // namespace helper
+
+// NOTE: Deprecated namespace. This namespace should no longer be used for the following symbols
+namespace V1_0::helper {
+// Export symbols to the old namespace to preserve compatibility
+typedef android::hardware::camera::common::helper::ExifUtils ExifUtils;
+} // namespace V1_0::helper
+
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_INTERFACES_CAMERA_COMMON_1_0_EXIF_H
diff --git a/camera/common/default/include/HandleImporter.h b/camera/common/default/include/HandleImporter.h
new file mode 100644
index 0000000..5408ba9
--- /dev/null
+++ b/camera/common/default/include/HandleImporter.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAMERA_COMMON_1_0_HANDLEIMPORTED_H
+#define CAMERA_COMMON_1_0_HANDLEIMPORTED_H
+
+#include <android/hardware/graphics/mapper/2.0/IMapper.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+#include <cutils/native_handle.h>
+#include <utils/Mutex.h>
+
+using android::hardware::graphics::mapper::V2_0::IMapper;
+using android::hardware::graphics::mapper::V2_0::YCbCrLayout;
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+// Borrowed from graphics HAL. Use this until gralloc mapper HAL is working
+class HandleImporter {
+ public:
+ HandleImporter();
+
+ // In IComposer, any buffer_handle_t is owned by the caller and we need to
+ // make a clone for hwcomposer2. We also need to translate empty handle
+ // to nullptr. This function does that, in-place.
+ bool importBuffer(buffer_handle_t& handle);
+ void freeBuffer(buffer_handle_t handle);
+ bool importFence(const native_handle_t* handle, int& fd) const;
+ void closeFence(int fd) const;
+
+ // Locks 1-D buffer. Assumes caller has waited for acquire fences.
+ void* lock(buffer_handle_t& buf, uint64_t cpuUsage, size_t size);
+
+ // Locks 2-D buffer. Assumes caller has waited for acquire fences.
+ void* lock(buffer_handle_t& buf, uint64_t cpuUsage, const IMapper::Rect& accessRegion);
+
+ // Assumes caller has waited for acquire fences.
+ YCbCrLayout lockYCbCr(buffer_handle_t& buf, uint64_t cpuUsage,
+ const IMapper::Rect& accessRegion);
+
+ // Query the stride of the first plane in bytes.
+ status_t getMonoPlanarStrideBytes(buffer_handle_t& buf, uint32_t* stride /*out*/);
+
+ int unlock(buffer_handle_t& buf); // returns release fence
+
+ // Query Gralloc4 metadata
+ bool isSmpte2086Present(const buffer_handle_t& buf);
+ bool isSmpte2094_10Present(const buffer_handle_t& buf);
+ bool isSmpte2094_40Present(const buffer_handle_t& buf);
+
+ private:
+ void initializeLocked();
+ void cleanup();
+
+ template <class M, class E>
+ bool importBufferInternal(const sp<M> mapper, buffer_handle_t& handle);
+ template <class M, class E>
+ YCbCrLayout lockYCbCrInternal(const sp<M> mapper, buffer_handle_t& buf, uint64_t cpuUsage,
+ const IMapper::Rect& accessRegion);
+ template <class M, class E>
+ int unlockInternal(const sp<M> mapper, buffer_handle_t& buf);
+
+ Mutex mLock;
+ bool mInitialized;
+ sp<IMapper> mMapperV2;
+ sp<graphics::mapper::V3_0::IMapper> mMapperV3;
+ sp<graphics::mapper::V4_0::IMapper> mMapperV4;
+};
+
+} // namespace helper
+
+// NOTE: Deprecated namespace. This namespace should no longer be used for the following symbols
+namespace V1_0::helper {
+// Export symbols to the old namespace to preserve compatibility
+typedef android::hardware::camera::common::helper::HandleImporter HandleImporter;
+} // namespace V1_0::helper
+
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif // CAMERA_COMMON_1_0_HANDLEIMPORTED_H
diff --git a/camera/common/default/include/SimpleThread.h b/camera/common/default/include/SimpleThread.h
new file mode 100644
index 0000000..d1becd6
--- /dev/null
+++ b/camera/common/default/include/SimpleThread.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HARDWARE_INTERFACES_CAMERA_COMMON_SIMPLETHREAD_H_
+#define HARDWARE_INTERFACES_CAMERA_COMMON_SIMPLETHREAD_H_
+
+#include <thread>
+
+namespace android {
+namespace hardware {
+namespace camera {
+namespace common {
+namespace helper {
+
+// A simple looper based on std::thread.
+class SimpleThread {
+ public:
+ SimpleThread();
+ virtual ~SimpleThread();
+
+ // Explicit call to start execution of the thread. No thread is created before this function
+ // is called.
+ virtual void run() final;
+ virtual void requestExitAndWait() final;
+
+ protected:
+ // Main logic of the thread. This function is called repeatedly until it returns false.
+ // Thread execution stops if this function returns false.
+ virtual bool threadLoop() = 0;
+
+ // Returns true if the thread execution should stop. Should be used by threadLoop to check if
+ // the thread has been requested to exit.
+ virtual inline bool exitPending() final { return mDone.load(std::memory_order_acquire); }
+
+ private:
+ // Wraps threadLoop in a simple while loop that allows safe exit
+ virtual void runLoop() final;
+
+ // Flag to signal end of thread execution. This flag is checked before every iteration
+ // of threadLoop.
+ std::atomic_bool mDone;
+ std::thread mThread;
+};
+
+} // namespace helper
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif // HARDWARE_INTERFACES_CAMERA_COMMON_SIMPLETHREAD_H_
diff --git a/camera/common/default/include/VendorTagDescriptor.h b/camera/common/default/include/VendorTagDescriptor.h
new file mode 100644
index 0000000..3133c26
--- /dev/null
+++ b/camera/common/default/include/VendorTagDescriptor.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAMERA_COMMON_1_0_VENDORTAGDESCRIPTOR_H
+#define CAMERA_COMMON_1_0_VENDORTAGDESCRIPTOR_H
+
+#include <system/camera_vendor_tags.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include <stdint.h>
+#include <unordered_map>
+
+namespace android {
+namespace hardware {
+namespace camera2 {
+namespace params {
+
+/**
+ * VendorTagDescriptor objects are containers for the vendor tag
+ * definitions provided, and are typically used to pass the vendor tag
+ * information enumerated by the HAL to clients of the camera service.
+ */
+class VendorTagDescriptor {
+ public:
+ virtual ~VendorTagDescriptor();
+
+ VendorTagDescriptor();
+ VendorTagDescriptor(const VendorTagDescriptor& src);
+ VendorTagDescriptor& operator=(const VendorTagDescriptor& rhs);
+
+ void copyFrom(const VendorTagDescriptor& src);
+
+ /**
+ * The following 'get*' methods implement the corresponding
+ * functions defined in
+ * system/media/camera/include/system/camera_vendor_tags.h
+ */
+
+ // Returns the number of vendor tags defined.
+ int getTagCount() const;
+
+ // Returns an array containing the id's of vendor tags defined.
+ void getTagArray(uint32_t* tagArray) const;
+
+ // Returns the section name string for a given vendor tag id.
+ const char* getSectionName(uint32_t tag) const;
+
+ // Returns the index in section vectors returned in getAllSectionNames()
+ // for a given vendor tag id. -1 if input tag does not exist.
+ ssize_t getSectionIndex(uint32_t tag) const;
+
+ // Returns the tag name string for a given vendor tag id.
+ const char* getTagName(uint32_t tag) const;
+
+ // Returns the tag type for a given vendor tag id.
+ int getTagType(uint32_t tag) const;
+
+ /**
+ * Convenience method to get a vector containing all vendor tag
+ * sections, or an empty vector if none are defined.
+ * The pointer is valid for the lifetime of the VendorTagDescriptor,
+ * or until copyFrom is invoked.
+ */
+ const SortedVector<String8>* getAllSectionNames() const;
+
+ /**
+ * Lookup the tag id for a given tag name and section.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ status_t lookupTag(const String8& name, const String8& section, /*out*/ uint32_t* tag) const;
+
+ /**
+ * Dump the currently configured vendor tags to a file descriptor.
+ */
+ void dump(int fd, int verbosity, int indentation) const;
+
+ protected:
+ KeyedVector<String8, KeyedVector<String8, uint32_t>*> mReverseMapping;
+ KeyedVector<uint32_t, String8> mTagToNameMap;
+ KeyedVector<uint32_t, uint32_t> mTagToSectionMap; // Value is offset in mSections
+
+ std::unordered_map<uint32_t, int32_t> mTagToTypeMap;
+ SortedVector<String8> mSections;
+ // must be int32_t to be compatible with Parcel::writeInt32
+ int32_t mTagCount;
+
+ vendor_tag_ops mVendorOps;
+};
+} /* namespace params */
+} /* namespace camera2 */
+
+namespace camera {
+namespace common {
+namespace helper {
+
+/**
+ * This version of VendorTagDescriptor must be stored in Android sp<>, and adds support for using it
+ * as a global tag descriptor.
+ *
+ * It's a child class of the basic hardware::camera2::params::VendorTagDescriptor since basic
+ * Parcelable objects cannot require being kept in an sp<> and still work with auto-generated AIDL
+ * interface implementations.
+ */
+class VendorTagDescriptor : public ::android::hardware::camera2::params::VendorTagDescriptor,
+ public LightRefBase<VendorTagDescriptor> {
+ public:
+ /**
+ * Create a VendorTagDescriptor object from the given vendor_tag_ops_t
+ * struct.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+ /*out*/
+ sp<VendorTagDescriptor>& descriptor);
+
+ /**
+ * Sets the global vendor tag descriptor to use for this process.
+ * Camera metadata operations that access vendor tags will use the
+ * vendor tag definitions set this way.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
+
+ /**
+ * Returns the global vendor tag descriptor used by this process.
+ * This will contain NULL if no vendor tags are defined.
+ */
+ static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
+
+ /**
+ * Clears the global vendor tag descriptor used by this process.
+ */
+ static void clearGlobalVendorTagDescriptor();
+};
+
+} /* namespace helper */
+} /* namespace common */
+} /* namespace camera */
+
+namespace camera2 {
+namespace params {
+
+class VendorTagDescriptorCache {
+ public:
+ typedef android::hardware::camera::common::helper::VendorTagDescriptor VendorTagDescriptor;
+ VendorTagDescriptorCache(){};
+ int32_t addVendorDescriptor(metadata_vendor_id_t id, sp<VendorTagDescriptor> desc);
+
+ int32_t getVendorTagDescriptor(metadata_vendor_id_t id, sp<VendorTagDescriptor>* desc /*out*/);
+
+ // Returns the number of vendor tags defined.
+ int getTagCount(metadata_vendor_id_t id) const;
+
+ // Returns an array containing the id's of vendor tags defined.
+ void getTagArray(uint32_t* tagArray, metadata_vendor_id_t id) const;
+
+ // Returns the section name string for a given vendor tag id.
+ const char* getSectionName(uint32_t tag, metadata_vendor_id_t id) const;
+
+ // Returns the tag name string for a given vendor tag id.
+ const char* getTagName(uint32_t tag, metadata_vendor_id_t id) const;
+
+ // Returns the tag type for a given vendor tag id.
+ int getTagType(uint32_t tag, metadata_vendor_id_t id) const;
+
+ /**
+ * Dump the currently configured vendor tags to a file descriptor.
+ */
+ void dump(int fd, int verbosity, int indentation) const;
+
+ protected:
+ std::unordered_map<metadata_vendor_id_t, sp<VendorTagDescriptor>> mVendorMap;
+ struct vendor_tag_cache_ops mVendorCacheOps;
+};
+
+} /* namespace params */
+} /* namespace camera2 */
+
+namespace camera {
+namespace common {
+namespace helper {
+
+class VendorTagDescriptorCache
+ : public ::android::hardware::camera2::params::VendorTagDescriptorCache,
+ public LightRefBase<VendorTagDescriptorCache> {
+ public:
+ /**
+ * Sets the global vendor tag descriptor cache to use for this process.
+ * Camera metadata operations that access vendor tags will use the
+ * vendor tag definitions set this way.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t setAsGlobalVendorTagCache(const sp<VendorTagDescriptorCache>& cache);
+
+ /**
+ * Returns the global vendor tag cache used by this process.
+ * This will contain NULL if no vendor tags are defined.
+ */
+ static sp<VendorTagDescriptorCache> getGlobalVendorTagCache();
+
+ /**
+ * Clears the global vendor tag cache used by this process.
+ */
+ static void clearGlobalVendorTagCache();
+};
+
+} // namespace helper
+
+// NOTE: Deprecated namespace. This namespace should no longer be used for the following symbols
+namespace V1_0::helper {
+// Export symbols to the old namespace to preserve compatibility
+typedef android::hardware::camera::common::helper::VendorTagDescriptor VendorTagDescriptor;
+typedef android::hardware::camera::common::helper::VendorTagDescriptorCache
+ VendorTagDescriptorCache;
+} // namespace V1_0::helper
+
+} // namespace common
+} // namespace camera
+} // namespace hardware
+} // namespace android
+
+#endif /* CAMERA_COMMON_1_0_VENDORTAGDESCRIPTOR_H */