Merge "Camera: Propagate colorspace to heic composite stream" into udc-dev
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c02573e..94851fb 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -155,6 +155,7 @@
 static const char *kCodecLatencyUnknown = "android.media.mediacodec.latency.unknown";
 static const char *kCodecQueueSecureInputBufferError = "android.media.mediacodec.queueSecureInputBufferError";
 static const char *kCodecQueueInputBufferError = "android.media.mediacodec.queueInputBufferError";
+static const char *kCodecComponentColorFormat = "android.media.mediacodec.component-color-format";
 
 static const char *kCodecNumLowLatencyModeOn = "android.media.mediacodec.low-latency.on";  /* 0..n */
 static const char *kCodecNumLowLatencyModeOff = "android.media.mediacodec.low-latency.off";  /* 0..n */
@@ -1129,7 +1130,7 @@
     // Video rendering quality metrics
     {
         const VideoRenderQualityMetrics &m = mVideoRenderQualityTracker.getMetrics();
-        if (m.frameRenderedCount > 0) {
+        if (m.frameReleasedCount > 0) {
             mediametrics_setInt64(mMetricsHandle, kCodecFirstRenderTimeUs, m.firstRenderTimeUs);
             mediametrics_setInt64(mMetricsHandle, kCodecFramesReleased, m.frameReleasedCount);
             mediametrics_setInt64(mMetricsHandle, kCodecFramesRendered, m.frameRenderedCount);
@@ -1534,7 +1535,11 @@
                 ALOGE("processRenderedFrames: no media time found");
                 continue;
             }
-            mVideoRenderQualityTracker.onFrameRendered(mediaTimeUs, renderTimeNs);
+            // Tunneled frames use INT64_MAX to indicate end-of-stream, so don't report it as a
+            // rendered frame.
+            if (!mTunneled || mediaTimeUs != INT64_MAX) {
+                mVideoRenderQualityTracker.onFrameRendered(mediaTimeUs, renderTimeNs);
+            }
         }
     }
 }
@@ -3807,6 +3812,14 @@
                         if (interestingFormat->findInt32("level", &level)) {
                             mediametrics_setInt32(mMetricsHandle, kCodecLevel, level);
                         }
+                        sp<AMessage> uncompressedFormat =
+                                (mFlags & kFlagIsEncoder) ? mInputFormat : mOutputFormat;
+                        int32_t componentColorFormat  = -1;
+                        if (uncompressedFormat->findInt32("android._color-format",
+                                &componentColorFormat)) {
+                            mediametrics_setInt32(mMetricsHandle,
+                                    kCodecComponentColorFormat, componentColorFormat);
+                        }
                         updateHdrMetrics(true /* isConfig */);
                         int32_t codecMaxInputSize = -1;
                         if (mInputFormat->findInt32(KEY_MAX_INPUT_SIZE, &codecMaxInputSize)) {
@@ -5818,6 +5831,10 @@
     }
 
     if (err == OK) {
+        if (mTunneled && (flags & (BUFFER_FLAG_DECODE_ONLY | BUFFER_FLAG_END_OF_STREAM)) == 0) {
+            mVideoRenderQualityTracker.onTunnelFrameQueued(timeUs);
+        }
+
         // synchronization boundary for getBufferAndFormat
         Mutex::Autolock al(mBufferLock);
         info->mOwnedByClient = false;
@@ -5900,7 +5917,7 @@
     }
 
     if (render && buffer->size() != 0) {
-        int64_t mediaTimeUs = -1;
+        int64_t mediaTimeUs = INT64_MIN;
         buffer->meta()->findInt64("timeUs", &mediaTimeUs);
 
         bool noRenderTime = false;
@@ -5931,8 +5948,11 @@
         // If rendering to the screen, then schedule a time in the future to poll to see if this
         // frame was ever rendered to seed onFrameRendered callbacks.
         if (mIsSurfaceToDisplay) {
-            noRenderTime ? mVideoRenderQualityTracker.onFrameReleased(mediaTimeUs)
-                         : mVideoRenderQualityTracker.onFrameReleased(mediaTimeUs, renderTimeNs);
+            if (mediaTimeUs != INT64_MIN) {
+                noRenderTime ? mVideoRenderQualityTracker.onFrameReleased(mediaTimeUs)
+                             : mVideoRenderQualityTracker.onFrameReleased(mediaTimeUs,
+                                                                          renderTimeNs);
+            }
             // can't initialize this in the constructor because the Looper parent class needs to be
             // initialized first
             if (mMsgPollForRenderedBuffers == nullptr) {
@@ -5963,9 +5983,10 @@
         }
     } else {
         if (mIsSurfaceToDisplay) {
-            int64_t mediaTimeUs = -1;
-            buffer->meta()->findInt64("timeUs", &mediaTimeUs);
-            mVideoRenderQualityTracker.onFrameSkipped(mediaTimeUs);
+            int64_t mediaTimeUs = INT64_MIN;
+            if (buffer->meta()->findInt64("timeUs", &mediaTimeUs)) {
+                mVideoRenderQualityTracker.onFrameSkipped(mediaTimeUs);
+            }
         }
         mBufferChannel->discardBuffer(buffer);
     }
diff --git a/media/libstagefright/VideoRenderQualityTracker.cpp b/media/libstagefright/VideoRenderQualityTracker.cpp
index 1072cdd..df25ead 100644
--- a/media/libstagefright/VideoRenderQualityTracker.cpp
+++ b/media/libstagefright/VideoRenderQualityTracker.cpp
@@ -87,6 +87,25 @@
     clear();
 }
 
+void VideoRenderQualityTracker::onTunnelFrameQueued(int64_t contentTimeUs) {
+    if (!mConfiguration.enabled) {
+        return;
+    }
+
+    // Since P-frames are queued out of order, hold onto the P-frame until we can track it in
+    // render order. This only works because it depends on today's encoding algorithms that only
+    // allow B-frames to refer to ONE P-frame that comes after it. If the cardinality of P-frames
+    // in a single mini-GOP is increased, this algorithm breaks down.
+    if (mTunnelFrameQueuedContentTimeUs == -1) {
+        mTunnelFrameQueuedContentTimeUs = contentTimeUs;
+    } else if (contentTimeUs < mTunnelFrameQueuedContentTimeUs) {
+        onFrameReleased(contentTimeUs, 0);
+    } else {
+        onFrameReleased(mTunnelFrameQueuedContentTimeUs, 0);
+        mTunnelFrameQueuedContentTimeUs = contentTimeUs;
+    }
+}
+
 void VideoRenderQualityTracker::onFrameSkipped(int64_t contentTimeUs) {
     if (!mConfiguration.enabled) {
         return;
@@ -137,6 +156,13 @@
     }
     mPendingSkippedFrameContentTimeUsList = {};
 
+    // We can render a pending queued frame if it's the last frame of the video, so release it
+    // immediately.
+    if (contentTimeUs == mTunnelFrameQueuedContentTimeUs && mTunnelFrameQueuedContentTimeUs != -1) {
+        onFrameReleased(mTunnelFrameQueuedContentTimeUs, 0);
+        mTunnelFrameQueuedContentTimeUs = -1;
+    }
+
     static const FrameInfo noFrame = {-1, -1};
     FrameInfo nextExpectedFrame = noFrame;
     while (!mNextExpectedRenderedFrameQueue.empty()) {
@@ -211,6 +237,7 @@
     // discontinuity. While stuttering or freezing could be found in the next few frames, the impact
     // to the user is is minimal, so better to just keep things simple and don't bother.
     mNextExpectedRenderedFrameQueue = {};
+    mTunnelFrameQueuedContentTimeUs = -1;
 
     // Ignore any frames that were skipped just prior to the discontinuity.
     mPendingSkippedFrameContentTimeUsList = {};
diff --git a/media/libstagefright/include/media/stagefright/MediaHistogram.h b/media/libstagefright/include/media/stagefright/MediaHistogram.h
index da8415a..50fa258 100644
--- a/media/libstagefright/include/media/stagefright/MediaHistogram.h
+++ b/media/libstagefright/include/media/stagefright/MediaHistogram.h
@@ -197,13 +197,11 @@
         for (int i = 0; i < mBucketLimits.size(); ++i) {
             ss << ',' << mBucketLimits[i];
         }
-        ss << ',' << mCeiling;
     } else {
         ss << mFloor;
-        for (int i = 0; i < mBuckets.size(); ++i) {
+        for (int i = 1; i <= mBuckets.size(); ++i) {
             ss << ',' << (mFloor + i * mWidth);
         }
-        ss << ',' << mCeiling;
     }
     return ss.str();
 }
diff --git a/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h b/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h
index ec25a36..8bfead9 100644
--- a/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h
+++ b/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h
@@ -150,6 +150,9 @@
     VideoRenderQualityTracker();
     VideoRenderQualityTracker(const Configuration &configuration);
 
+    // Called when a tunnel mode frame has been queued.
+    void onTunnelFrameQueued(int64_t contentTimeUs);
+
     // Called when the app has intentionally decided not to render this frame.
     void onFrameSkipped(int64_t contentTimeUs);
 
@@ -277,6 +280,11 @@
     // checking to see if the next expected frame is rendered. If not, it is considered dropped.
     std::queue<FrameInfo> mNextExpectedRenderedFrameQueue;
 
+    // When B-frames are present in the stream, a P-frame will be queued before the B-frame even
+    // though it is rendered after. Therefore, the P-frame is held here and not inserted into
+    // mNextExpectedRenderedFrameQueue until it should be inserted to maintain render order.
+    int64_t mTunnelFrameQueuedContentTimeUs;
+
     // Frame durations derived from timestamps encoded into the content stream. These are the
     // durations that each frame is supposed to be rendered for.
     FrameDurationUs mContentFrameDurationUs;
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index ad4cfce..ea76bcd 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -648,6 +648,12 @@
     }
     AStatsEvent_writeInt32(event, resolutionChangeCount);
 
+    int32_t componentColorFormat = -1;
+    if (item->getInt32("android.media.mediacodec.component-color-format", &componentColorFormat)) {
+        metrics_proto.set_component_color_format(componentColorFormat);
+    }
+    AStatsEvent_writeInt32(event, componentColorFormat);
+
     int64_t firstRenderTimeUs = -1;
     item->getInt64("android.media.mediacodec.first-render-time-us", &firstRenderTimeUs);
     int64_t framesReleased = -1;