[automerger skipped] RESTRICT AUTOMERGE - resolve merge conflicts of dab37c25e3337387809fd35c7cd46abf76088b83 to qt-qpr1-dev am: ec0754c9b5 -s ours am: 8537cde345 -s ours am: b60b4588d5 -s ours am: d4e19b2899 -s ours am: cbe0329167 -s ours am: 0fd10f21a3 -s ours
am skip reason: subject contains skip directive
Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/av/+/19823968
Change-Id: Ibdbe59aea5819c18cde3eb7f9f3d9e39cc8de82a
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index be47898..bb880d1 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -52,7 +52,10 @@
parcel->readInt64(&lastCompletedRegularFrameNumber);
parcel->readInt64(&lastCompletedReprocessFrameNumber);
parcel->readInt64(&lastCompletedZslFrameNumber);
-
+ parcel->readBool(&hasReadoutTimestamp);
+ if (hasReadoutTimestamp) {
+ parcel->readInt64(&readoutTimestamp);
+ }
return OK;
}
@@ -82,6 +85,10 @@
parcel->writeInt64(lastCompletedRegularFrameNumber);
parcel->writeInt64(lastCompletedReprocessFrameNumber);
parcel->writeInt64(lastCompletedZslFrameNumber);
+ parcel->writeBool(hasReadoutTimestamp);
+ if (hasReadoutTimestamp) {
+ parcel->writeInt64(readoutTimestamp);
+ }
return OK;
}
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index f163c1e..de534ab 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -103,6 +103,17 @@
*/
int64_t lastCompletedZslFrameNumber;
+ /**
+ * Whether the readoutTimestamp variable is valid and should be used.
+ */
+ bool hasReadoutTimestamp;
+
+ /**
+ * The readout timestamp of the capture. Its value is equal to the
+ * start-of-exposure timestamp plus the exposure time (and a possible fixed
+ * offset due to sensor crop).
+ */
+ int64_t readoutTimestamp;
/**
* Constructor initializes object as invalid by setting requestId to be -1.
@@ -118,7 +129,9 @@
errorPhysicalCameraId(),
lastCompletedRegularFrameNumber(-1),
lastCompletedReprocessFrameNumber(-1),
- lastCompletedZslFrameNumber(-1) {
+ lastCompletedZslFrameNumber(-1),
+ hasReadoutTimestamp(false),
+ readoutTimestamp(0) {
}
/**
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index b842885..b7c7f7f 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -43,7 +43,9 @@
TIMESTAMP_BASE_SENSOR = 1,
TIMESTAMP_BASE_MONOTONIC = 2,
TIMESTAMP_BASE_REALTIME = 3,
- TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4
+ TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4,
+ TIMESTAMP_BASE_READOUT_SENSOR = 5,
+ TIMESTAMP_BASE_MAX = TIMESTAMP_BASE_READOUT_SENSOR,
};
enum MirrorModeType {
MIRROR_MODE_AUTO = 0,
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index b6f8552..0d156a5 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -2198,6 +2198,10 @@
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#turnOnTorchWithStrengthLevel">CameraManager#turnOnTorchWithStrengthLevel</a>.
* If this value is equal to 1, flashlight brightness control is not supported.
* The value for this key will be null for devices with no flash unit.</p>
+ * <p>The maximum value is guaranteed to be safe to use for an indefinite duration in
+ * terms of device flashlight lifespan, but may be too bright for comfort for many
+ * use cases. Use the default torch brightness value to avoid problems with an
+ * over-bright flashlight.</p>
*/
ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL = // int32
ACAMERA_FLASH_INFO_START + 2,
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.cpp b/media/libstagefright/renderfright/gl/ProgramCache.cpp
index 3ae35ec..56d35a9 100644
--- a/media/libstagefright/renderfright/gl/ProgramCache.cpp
+++ b/media/libstagefright/renderfright/gl/ProgramCache.cpp
@@ -299,8 +299,8 @@
highp vec3 ScaleLuminance(highp vec3 color) {
// The formula is:
// alpha * pow(Y, gamma - 1.0) * color + beta;
- // where alpha is 1000.0, gamma is 1.2, beta is 0.0.
- return color * 1000.0 * pow(color.y, 0.2);
+ // where alpha is displayMaxLuminance, gamma is 1.2, beta is 0.0.
+ return color * displayMaxLuminance * pow(color.y, 0.2);
}
)__SHADER__";
break;
@@ -316,7 +316,6 @@
// Tone map absolute light to display luminance range.
switch (needs.getInputTF()) {
case Key::INPUT_TF_ST2084:
- case Key::INPUT_TF_HLG:
switch (needs.getOutputTF()) {
case Key::OUTPUT_TF_HLG:
// Right now when mixed PQ and HLG contents are presented,
@@ -396,6 +395,14 @@
break;
}
break;
+ case Key::INPUT_TF_HLG:
+ // HLG OOTF is already applied as part of ScaleLuminance
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ break;
default:
// inverse tone map; the output luminance can be up to maxOutLumi.
fs << R"__SHADER__(
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 07e82a8..3fa3e41 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3967,19 +3967,24 @@
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
- // mono blend occurs for mixer threads only (not direct or offloaded)
- // and is handled here if we're going directly to the sink.
- if (requireMonoBlend() && !mEffectBufferValid) {
- mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount, mNormalFrameCount,
- true /*limit*/);
- }
+ // Apply mono blending and balancing if the effect buffer is not valid. Otherwise,
+ // do these processes after effects are applied.
+ if (!mEffectBufferValid) {
+ // mono blend occurs for mixer threads only (not direct or offloaded)
+ // and is handled here if we're going directly to the sink.
+ if (requireMonoBlend()) {
+ mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount,
+ mNormalFrameCount, true /*limit*/);
+ }
- if (!hasFastMixer()) {
- // Balance must take effect after mono conversion.
- // We do it here if there is no FastMixer.
- // mBalance detects zero balance within the class for speed (not needed here).
- mBalance.setBalance(mMasterBalance.load());
- mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ if (!hasFastMixer()) {
+ // Balance must take effect after mono conversion.
+ // We do it here if there is no FastMixer.
+ // mBalance detects zero balance within the class for speed
+ // (not needed here).
+ mBalance.setBalance(mMasterBalance.load());
+ mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ }
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.h b/services/camera/libcameraservice/CameraServiceWatchdog.h
index f4955e2..29ddab1 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.h
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.h
@@ -26,7 +26,7 @@
* and single call monitoring differently. See function documentation for
* more details.
*/
-
+#pragma once
#include <chrono>
#include <thread>
#include <time.h>
@@ -61,7 +61,7 @@
/** Used to wrap monitored calls in start and stop functions using custom timer values */
template<typename T>
auto watchThread(T func, uint32_t tid, uint32_t cycles, uint32_t cycleLength) {
- auto res = NULL;
+ decltype(func()) res;
if (cycles != mMaxCycles || cycleLength != mCycleLengthMs) {
// Create another instance of the watchdog to prevent disruption
@@ -84,10 +84,9 @@
/** Used to wrap monitored calls in start and stop functions using class timer values */
template<typename T>
auto watchThread(T func, uint32_t tid) {
- auto res = NULL;
start(tid);
- res = func();
+ auto res = func();
stop(tid);
return res;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index dc5002b..a3d24ff 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -40,6 +40,9 @@
#include "utils/CameraServiceProxyWrapper.h"
namespace android {
+
+const static size_t kDisconnectTimeoutMs = 2500;
+
using namespace camera2;
// Interface used by CameraService
@@ -144,6 +147,10 @@
wp<NotificationListener> weakThis(this);
res = mDevice->setNotifyCallback(weakThis);
+ /** Start watchdog thread */
+ mCameraServiceWatchdog = new CameraServiceWatchdog();
+ mCameraServiceWatchdog->run("Camera2ClientBaseWatchdog");
+
return OK;
}
@@ -155,6 +162,11 @@
disconnect();
+ if (mCameraServiceWatchdog != NULL) {
+ mCameraServiceWatchdog->requestExit();
+ mCameraServiceWatchdog.clear();
+ }
+
ALOGI("Closed Camera %s. Client was: %s (PID %d, UID %u)",
TClientBase::mCameraIdStr.string(),
String8(TClientBase::mClientPackageName).string(),
@@ -238,9 +250,18 @@
// ICameraClient2BaseUser interface
-
template <typename TClientBase>
binder::Status Camera2ClientBase<TClientBase>::disconnect() {
+ if (mCameraServiceWatchdog != nullptr) {
+ // Initialization from hal succeeded, time disconnect.
+ return mCameraServiceWatchdog->WATCH_CUSTOM_TIMER(disconnectImpl(),
+ kDisconnectTimeoutMs / kCycleLengthMs, kCycleLengthMs);
+ }
+ return disconnectImpl();
+}
+
+template <typename TClientBase>
+binder::Status Camera2ClientBase<TClientBase>::disconnectImpl() {
ATRACE_CALL();
ALOGD("Camera %s: start to disconnect", TClientBase::mCameraIdStr.string());
Mutex::Autolock icl(mBinderSerializationLock);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index b0d1c3f..3af781b 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -19,6 +19,7 @@
#include "common/CameraDeviceBase.h"
#include "camera/CaptureResult.h"
+#include "CameraServiceWatchdog.h"
namespace android {
@@ -173,6 +174,12 @@
private:
template<typename TProviderPtr>
status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
+
+ binder::Status disconnectImpl();
+
+ // Watchdog thread
+ sp<CameraServiceWatchdog> mCameraServiceWatchdog;
+
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index abaea66..6ef16b3 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -1408,6 +1408,27 @@
return res;
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addReadoutTimestampTag(
+ bool readoutTimestampSupported) {
+ status_t res = OK;
+ auto& c = mCameraCharacteristics;
+
+ auto entry = c.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
+ if (entry.count != 0) {
+ ALOGE("%s: CameraCharacteristics must not contain ANDROID_SENSOR_READOUT_TIMESTAMP!",
+ __FUNCTION__);
+ }
+
+ uint8_t readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
+ if (readoutTimestampSupported) {
+ readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
+ }
+
+ res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &readoutTimestamp, 1);
+
+ return res;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::removeAvailableKeys(
CameraMetadata& c, const std::vector<uint32_t>& keys, uint32_t keyTag) {
status_t res = OK;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a66598d..d049aff 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -663,6 +663,7 @@
status_t deriveHeicTags(bool maxResolution = false);
status_t addRotateCropTags();
status_t addPreCorrectionActiveArraySize();
+ status_t addReadoutTimestampTag(bool readoutTimestampSupported = true);
static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
android_pixel_format_t format,
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index 81b4779..ef68f28 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -532,6 +532,11 @@
ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = addReadoutTimestampTag();
+ if (OK != res) {
+ ALOGE("%s: Unable to add sensorReadoutTimestamp tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
index bded9aa..d60565f 100644
--- a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
@@ -655,6 +655,11 @@
ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = addReadoutTimestampTag(/*readoutTimestampSupported*/false);
+ if (OK != res) {
+ ALOGE("%s: Unable to add sensorReadoutTimestamp tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 1e20ee0..69163a5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -67,6 +67,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -100,6 +101,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -140,6 +142,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -188,6 +191,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -462,17 +466,19 @@
}
}
+ nsecs_t captureTime = (mUseReadoutTime && readoutTimestamp != 0 ?
+ readoutTimestamp : timestamp) - mTimestampOffset;
if (mPreviewFrameSpacer != nullptr) {
- res = mPreviewFrameSpacer->queuePreviewBuffer(timestamp - mTimestampOffset,
- readoutTimestamp - mTimestampOffset, transform, anwBuffer, anwReleaseFence);
+ nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
+ - mTimestampOffset;
+ res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
+ transform, anwBuffer, anwReleaseFence);
if (res != OK) {
ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
} else {
- nsecs_t captureTime = (mSyncToDisplay ? readoutTimestamp : timestamp)
- - mTimestampOffset;
nsecs_t presentTime = mSyncToDisplay ?
syncTimestampToDisplayLocked(captureTime) : captureTime;
@@ -705,12 +711,16 @@
mFrameCount = 0;
mLastTimestamp = 0;
+ mUseReadoutTime =
+ (timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR || mSyncToDisplay);
+
if (isDeviceTimeBaseRealtime()) {
if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
// Default time base, but not hardware composer or video encoder
mTimestampOffset = 0;
} else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
- timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR) {
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR ||
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR) {
mTimestampOffset = 0;
}
// If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
@@ -720,7 +730,7 @@
// Reverse offset for monotonicTime -> bootTime
mTimestampOffset = -mTimestampOffset;
} else {
- // If timestampBase is DEFAULT, MONOTONIC, SENSOR, or
+ // If timestampBase is DEFAULT, MONOTONIC, SENSOR, READOUT_SENSOR or
// CHOREOGRAPHER_SYNCED, timestamp offset is 0.
mTimestampOffset = 0;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index e8065ce..3587af4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -341,6 +341,11 @@
nsecs_t mTimestampOffset;
/**
+ * If camera readout time is used rather than the start-of-exposure time.
+ */
+ bool mUseReadoutTime;
+
+ /**
* Consumer end point usage flag set by the constructor for the deferred
* consumer case.
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index ed66df0..f4e3fad 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -787,10 +787,12 @@
SessionStatsBuilder& sessionStatsBuilder) {
bool timestampIncreasing =
!((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
+ nsecs_t readoutTimestamp = request.resultExtras.hasReadoutTimestamp ?
+ request.resultExtras.readoutTimestamp : 0;
returnOutputBuffers(useHalBufManager, listener,
request.pendingOutputBuffers.array(),
request.pendingOutputBuffers.size(),
- request.shutterTimestamp, request.shutterReadoutTimestamp,
+ request.shutterTimestamp, readoutTimestamp,
/*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
request.outputSurfaces, request.resultExtras,
request.errorBufStrategy, request.transform);
@@ -852,7 +854,10 @@
}
r.shutterTimestamp = msg.timestamp;
- r.shutterReadoutTimestamp = msg.readout_timestamp;
+ if (msg.readout_timestamp_valid) {
+ r.resultExtras.hasReadoutTimestamp = true;
+ r.resultExtras.readoutTimestamp = msg.readout_timestamp;
+ }
if (r.minExpectedDuration != states.minFrameDuration) {
for (size_t i = 0; i < states.outputStreams.size(); i++) {
auto outputStream = states.outputStreams[i];
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 493a9e2..fa00495 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -65,6 +65,7 @@
typedef struct camera_shutter_msg {
uint32_t frame_number;
uint64_t timestamp;
+ bool readout_timestamp_valid;
uint64_t readout_timestamp;
} camera_shutter_msg_t;
@@ -104,8 +105,6 @@
struct InFlightRequest {
// Set by notify() SHUTTER call.
nsecs_t shutterTimestamp;
- // Set by notify() SHUTTER call with readout time.
- nsecs_t shutterReadoutTimestamp;
// Set by process_capture_result().
nsecs_t sensorTimestamp;
int requestStatus;
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
index 02eebd2..b2accc1 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
@@ -110,6 +110,7 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.get<Tag::shutter>().frameNumber;
m.message.shutter.timestamp = msg.get<Tag::shutter>().timestamp;
+ m.message.shutter.readout_timestamp_valid = true;
m.message.shutter.readout_timestamp = msg.get<Tag::shutter>().readoutTimestamp;
break;
}
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
index 8b0cd65..ff6fc17 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
@@ -105,6 +105,7 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+ m.message.shutter.readout_timestamp_valid = false;
m.message.shutter.readout_timestamp = 0LL;
break;
}
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 2eb2d55..7dde268 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -458,7 +458,7 @@
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (timestampBase < OutputConfiguration::TIMESTAMP_BASE_DEFAULT ||
- timestampBase > OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED) {
+ timestampBase > OutputConfiguration::TIMESTAMP_BASE_MAX) {
String8 msg = String8::format("Camera %s: invalid timestamp base %d",
logicalCameraId.string(), timestampBase);
ALOGE("%s: %s", __FUNCTION__, msg.string());
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
index 4e6f832..5444f2a 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
@@ -50,7 +50,7 @@
for (const auto &stream : aidl.streams) {
if (static_cast<int>(stream.dynamicRangeProfile) !=
ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
- ALOGE("%s Dynamic range profile %" PRId64 " not supported by HIDL", __FUNCTION__,
+ ALOGE("%s Dynamic range profile %" PRId64 " not supported by HIDL", __FUNCTION__,
stream.dynamicRangeProfile);
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 461f5e9..fe87ed6 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -169,6 +169,22 @@
camera_metadata_entry lastEntry = lastValues.find(tag);
+ // Monitor when the stream ids change, this helps visually see what
+ // monitored metadata values are for capture requests with different
+ // stream ids.
+ if (source == REQUEST) {
+ if (inputStreamId != mLastInputStreamId) {
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, camera_metadata_ro_entry_t{},
+ cameraId, std::unordered_set<int>(), inputStreamId);
+ mLastInputStreamId = inputStreamId;
+ }
+
+ if (outputStreamIds != mLastStreamIds) {
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, camera_metadata_ro_entry_t{},
+ cameraId, outputStreamIds, -1);
+ mLastStreamIds = outputStreamIds;
+ }
+ }
if (entry.count > 0) {
bool isDifferent = false;
if (lastEntry.count > 0) {
@@ -190,22 +206,14 @@
// No last entry, so always consider to be different
isDifferent = true;
}
- // Also monitor when the stream ids change, this helps visually see what
- // monitored metadata values are for capture requests with different
- // stream ids.
- if (source == REQUEST &&
- (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds)) {
- mLastInputStreamId = inputStreamId;
- mLastStreamIds = outputStreamIds;
- isDifferent = true;
- }
+
if (isDifferent) {
ALOGV("%s: Tag %s changed", __FUNCTION__,
get_local_camera_metadata_tag_name_vendor_id(
tag, mVendorTagId));
lastValues.update(entry);
mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
- outputStreamIds, inputStreamId);
+ std::unordered_set<int>(), -1);
}
} else if (lastEntry.count > 0) {
// Value has been removed
@@ -219,8 +227,8 @@
entry.count = 0;
mLastInputStreamId = inputStreamId;
mLastStreamIds = outputStreamIds;
- mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId, outputStreamIds,
- inputStreamId);
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
+ std::unordered_set<int>(), -1);
}
}
@@ -261,23 +269,39 @@
for (const auto& event : mMonitoringEvents) {
int indentation = (event.source == REQUEST) ? 15 : 30;
- String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
+ String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s",
event.frameNumber, event.timestamp,
2, event.cameraId.c_str(),
indentation,
- event.source == REQUEST ? "REQ:" : "RES:",
+ event.source == REQUEST ? "REQ:" : "RES:");
+
+ if (!event.outputStreamIds.empty()) {
+ eventString += " output stream ids:";
+ for (const auto& id : event.outputStreamIds) {
+ eventString.appendFormat(" %d", id);
+ }
+ eventString += "\n";
+ vec.emplace_back(eventString.string());
+ continue;
+ }
+
+ if (event.inputStreamId != -1) {
+ eventString.appendFormat(" input stream id: %d\n", event.inputStreamId);
+ vec.emplace_back(eventString.string());
+ continue;
+ }
+
+ eventString += String8::format(
+ "%s.%s: ",
get_local_camera_metadata_section_name_vendor_id(event.tag, mVendorTagId),
get_local_camera_metadata_tag_name_vendor_id(event.tag, mVendorTagId));
- if (event.newData.size() == 0) {
- eventString += " (Removed)";
+
+ if (event.newData.empty()) {
+ eventString += " (Removed)\n";
} else {
- eventString += getEventDataString(event.newData.data(),
- event.tag,
- event.type,
- event.newData.size() / camera_metadata_type_size[event.type],
- indentation + 18,
- event.outputStreamIds,
- event.inputStreamId);
+ eventString += getEventDataString(
+ event.newData.data(), event.tag, event.type,
+ event.newData.size() / camera_metadata_type_size[event.type], indentation + 18);
}
vec.emplace_back(eventString.string());
}
@@ -285,13 +309,8 @@
#define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
-String8 TagMonitor::getEventDataString(const uint8_t* data_ptr,
- uint32_t tag,
- int type,
- int count,
- int indentation,
- const std::unordered_set<int32_t>& outputStreamIds,
- int32_t inputStreamId) {
+String8 TagMonitor::getEventDataString(const uint8_t* data_ptr, uint32_t tag, int type, int count,
+ int indentation) {
static int values_per_line[NUM_TYPES] = {
[TYPE_BYTE] = 16,
[TYPE_INT32] = 8,
@@ -362,17 +381,7 @@
returnStr += "??? ";
}
}
- returnStr += "] ";
- if (!outputStreamIds.empty()) {
- returnStr += "output stream ids: ";
- for (const auto &id : outputStreamIds) {
- returnStr.appendFormat(" %d ", id);
- }
- }
- if (inputStreamId != -1) {
- returnStr.appendFormat("input stream id: %d", inputStreamId);
- }
- returnStr += "\n";
+ returnStr += "]\n";
}
return returnStr;
}
@@ -385,11 +394,12 @@
source(src),
frameNumber(frameNumber),
timestamp(timestamp),
+ cameraId(cameraId),
tag(value.tag),
type(value.type),
newData(value.data.u8, value.data.u8 + camera_metadata_type_size[value.type] * value.count),
- cameraId(cameraId), outputStreamIds(outputStreamIds), inputStreamId(inputStreamId) {
-}
+ outputStreamIds(outputStreamIds),
+ inputStreamId(inputStreamId) {}
TagMonitor::MonitorEvent::~MonitorEvent() {
}
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index 088d6fe..9ded15d 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -85,12 +85,8 @@
// function.
void dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &out);
- static String8 getEventDataString(const uint8_t *data_ptr,
- uint32_t tag, int type,
- int count,
- int indentation,
- const std::unordered_set<int32_t> &outputStreamIds,
- int32_t inputStreamId);
+ static String8 getEventDataString(const uint8_t* data_ptr, uint32_t tag, int type, int count,
+ int indentation);
void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
@@ -128,12 +124,15 @@
eventSource source;
uint32_t frameNumber;
nsecs_t timestamp;
+ std::string cameraId;
uint32_t tag;
uint8_t type;
std::vector<uint8_t> newData;
- std::string cameraId;
+ // NOTE: We want to print changes to outputStreamIds and inputStreamId in their own lines.
+ // So any MonitorEvent where these fields are not the default value will have garbage
+ // values for all fields other than source, frameNumber, timestamp, and cameraId.
std::unordered_set<int32_t> outputStreamIds;
- int32_t inputStreamId = 1;
+ int32_t inputStreamId = -1;
};
// A ring buffer for tracking the last kMaxMonitorEvents metadata changes