Merge "audiorecord_tests: Fix remaining threading issues" into main
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
index 16ea15e..6e55a16 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
@@ -64,10 +64,6 @@
         "libfwdlock-decoder",
     ],
 
-    whole_static_libs: [
-        "libc++fs",
-    ],
-
     local_include_dirs: ["include"],
 
     relative_install_path: "drm",
diff --git a/media/audioaidlconversion/include/media/AidlConversionEffect.h b/media/audioaidlconversion/include/media/AidlConversionEffect.h
index b03d06b..e51bf8b 100644
--- a/media/audioaidlconversion/include/media/AidlConversionEffect.h
+++ b/media/audioaidlconversion/include/media/AidlConversionEffect.h
@@ -72,9 +72,6 @@
                 MAKE_EXTENSION_PARAMETER_ID(_effect, _tag##Tag, _extId);                          \
         aidl::android::hardware::audio::effect::Parameter _aidlParam;                             \
         RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(_id, &_aidlParam))); \
-        aidl::android::hardware::audio::effect::VendorExtension _ext =                            \
-                VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(                              \
-                        _aidlParam, _effect, _tag, _effect::vendor, VendorExtension));            \
         return VALUE_OR_RETURN_STATUS(                                                            \
                 aidl::android::aidl2legacy_Parameter_EffectParameterWriter(_aidlParam, _param));  \
     }
diff --git a/media/codec2/hal/common/MultiAccessUnitHelper.cpp b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
index b1fa82f..55bf7a9 100644
--- a/media/codec2/hal/common/MultiAccessUnitHelper.cpp
+++ b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
@@ -497,9 +497,10 @@
                     // This is to take care of the last bytes and to decide to send with
                     // FLAG_INCOMPLETE or not.
                     if ((frame->mWview
-                            && (frame->mWview->offset() > frame->mLargeFrameTuning.thresholdSize))
+                            && (frame->mWview->offset() >= frame->mLargeFrameTuning.thresholdSize))
                             || frame->mComponentFrameIds.empty()) {
                         if (frame->mLargeWork) {
+                            frame->mLargeWork->result = C2_OK;
                             finalizeWork(*frame);
                             addOutWork(frame->mLargeWork);
                             frame->reset();
@@ -558,12 +559,15 @@
         c2_status_t ret = C2_OK;
         if (frame.mLargeWork == nullptr) {
             frame.mLargeWork.reset(new C2Work);
+            frame.mLargeWork->result = C2_OK;
+            frame.mLargeWork->input.flags = (C2FrameData::flags_t)0;
             frame.mLargeWork->input.ordinal = frame.inOrdinal;
             frame.mLargeWork->input.ordinal.frameIndex = frame.inOrdinal.frameIndex;
         }
         if (allocateWorket) {
             if (frame.mLargeWork->worklets.size() == 0) {
                 frame.mLargeWork->worklets.emplace_back(new C2Worklet);
+                frame.mLargeWork->worklets.back()->output.flags = (C2FrameData::flags_t)0;
             }
         }
         if (allocateBuffer) {
@@ -611,6 +615,9 @@
         if (c2ret != C2_OK) {
             return c2ret;
         }
+        uint32_t flags = work->input.flags;
+        flags |= frame.mLargeWork->input.flags;
+        frame.mLargeWork->input.flags = (C2FrameData::flags_t)flags;
         C2FrameData& outputFramedata = frame.mLargeWork->worklets.front()->output;
         if (!(*worklet)->output.configUpdate.empty()) {
             for (auto& configUpdate : (*worklet)->output.configUpdate) {
@@ -678,6 +685,9 @@
                         }
                     }
                     allocateWork(frame, true, true);
+                    uint32_t flags = work->input.flags;
+                    flags |= frame.mLargeWork->input.flags;
+                    frame.mLargeWork->input.flags = (C2FrameData::flags_t)flags;
                     C2ReadView rView = blocks.front().map().get();
                     if (rView.error()) {
                         LOG(ERROR) << "Buffer read view error";
@@ -744,7 +754,8 @@
     }
     LOG(DEBUG) << "Finalizing work with input Idx "
             << frame.mLargeWork->input.ordinal.frameIndex.peekull()
-            << " timestamp " << timeStampUs;
+            << " timestamp " << timeStampUs
+            << " inFlags " << inFlags;
     uint32_t finalFlags = 0;
     if ((!forceComplete)
             && (frame.mLargeWork->result == C2_OK)
diff --git a/media/codec2/vndk/include/C2BqBufferPriv.h b/media/codec2/vndk/include/C2BqBufferPriv.h
index 1e8dd40..806932c 100644
--- a/media/codec2/vndk/include/C2BqBufferPriv.h
+++ b/media/codec2/vndk/include/C2BqBufferPriv.h
@@ -44,7 +44,77 @@
  *
  * If a specific HGBP is configured, the HGBP acts as an allocator for creating graphic blocks.
  *
- * TODO: add more ducumentation(graphic block life-cycle, waitable object and workaounds)
+ *
+ * HGBP/IGBP and the BlockPool
+ *
+ * GraphicBuffer(s) from BufferQueue(IGBP/IGBC) are based on slot id.
+ * A created GraphicBuffer occupies a slot(so the GraphicBuffer has a slot-id).
+ * A GraphicBuffer is produced and consumed and recyled based on the slot-id
+ * w.r.t. BufferQueue.
+ *
+ * HGBP::dequeueBuffer() returns a slot id where the slot has an available GraphicBuffer.
+ * If it is necessary, HGBP allocates a new GraphicBuffer to the slot and indicates
+ * that a new buffer is allocated as return flag.
+ * To retrieve the GraphicBuffer, HGBP::requestBuffer() along with the slot id
+ * is required. In order to save HGBP remote calls, the blockpool caches the
+ * allocated GraphicBuffer(s) along with the slot information.
+ *
+ * The blockpool provides C2GraphicBlock upon \fetchGraphicBlock().
+ * The C2GraphicBlock has a native handle, which is extracted from a GraphicBuffer
+ * and then cloned for independent life-cycle with the GraphicBuffer. The GraphicBuffer
+ * is allocated by HGBP::dequeueBuffer() and retrieved by HGBP::requestBuffer()
+ * if there is a HGBP configured.
+ *
+ *
+ * Life-cycle of C2GraphicBlock
+ *
+ * The decoder HAL writes a decoded frame into C2GraphicBlock. Upon
+ * completion, the component sends the block to the client in the remote process
+ * (i.e. to MediaCodec). The remote process renders the frame into the output surface
+ * via IGBP::queueBuffer() (Note: this is not hidlized.).
+ *
+ * If the decoder HAL destroys the C2GraphicBlock without transferring to the
+ * client, the destroy request goes to the BlockPool. Then
+ * the BlockPool free the associated GraphicBuffer from a slot to
+ * HGBP in order to recycle via HGBP::cancelBuffer().
+ *
+ *
+ * Clearing the Cache(GraphicBuffer)
+ *
+ * When the output surface is switched to a new surface, The GraphicBuffers from
+ * the old surface is either migrated or cleared.
+ *
+ * The GraphicBuffer(s) still in use are migrated to a new surface during
+ * configuration via HGBP::attachBuffer(). The GraphicBuffer(s) not in use are
+ * cleared from the cache inside the BlockPool.
+ *
+ * When the surface is switched to a null surface, all the
+ * GraphicBuffers in the cache are cleared.
+ *
+ *
+ * Workaround w.r.t. b/322731059 (Deferring cleaning the cache)
+ *
+ * Some vendor devices have issues with graphic buffer lifecycle management,
+ * where the graphic buffers get released even when the cloned native handles
+ * in the remote process are not closed yet. This issue led to rare crashes
+ * for those devices when the cache is cleared early.
+ *
+ * We workarounded the crash by deferring the cleaning of the cache.
+ * The workaround is not enabled by default, and can be enabled via a
+ * system property as shown below:
+ *
+ *        'debug.codec2.bqpool_dealloc_after_stop' = 1
+ *
+ * Configuring the debug flag will call \::setDeferDeallocationAfterStop()
+ * after the blockpool creation. This will enable the deferring.
+ *
+ * After enabling the deferring, clearing the GraphicBuffer is delayed until
+ *  1) \::clearDeferredBlocks() is called.
+ *        Typically after HAL processes stop() request.
+ *  2) Or a new ::fetchGraphicBlock() is called.
+ *
+ *  Since the deferring will delay the deallocation, the deferring will result
+ *  in more memory consumption during the brief period.
  */
 class C2BufferQueueBlockPool : public C2BlockPool {
 public:
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 9d9b574..f6ddc3e 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -574,7 +574,7 @@
      * For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
      * AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
      *
-     * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Build.VERSION_CODES</a>,
+     * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Q</a>,
      * this means only {@link #AAUDIO_USAGE_MEDIA} and {@link #AAUDIO_USAGE_GAME} may be captured.
      *
      * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_ALL">
diff --git a/media/libaudiohal/impl/StreamHalAidl.cpp b/media/libaudiohal/impl/StreamHalAidl.cpp
index 12a8019..fb4b549 100644
--- a/media/libaudiohal/impl/StreamHalAidl.cpp
+++ b/media/libaudiohal/impl/StreamHalAidl.cpp
@@ -445,6 +445,11 @@
                    state == StreamDescriptor::State::TRANSFER_PAUSED ||
                    state == StreamDescriptor::State::DRAIN_PAUSED) {
             return sendCommand(makeHalCommand<HalCommand::Tag::start>(), reply);
+        } else if (state == StreamDescriptor::State::ACTIVE ||
+                   state == StreamDescriptor::State::TRANSFERRING ||
+                   state == StreamDescriptor::State::DRAINING) {
+            ALOGD("%s: already in stream state: %s", __func__, toString(state).c_str());
+            return OK;
         } else {
             ALOGE("%s: unexpected stream state: %s (expected IDLE or one of *PAUSED states)",
                         __func__, toString(state).c_str());
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index e918b5e..15188b0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -72,6 +72,9 @@
 static const int64_t kMaxMetadataSize = 0x4000000LL;   // 64MB max per-frame metadata size
 static const int64_t kMaxCttsOffsetTimeUs = 30 * 60 * 1000000LL;  // 30 minutes
 static const size_t kESDSScratchBufferSize = 10;  // kMaxAtomSize in Mpeg4Extractor 64MB
+// Allow up to 100 milli second, which is safely above the maximum delay observed in manual testing
+// between posting from setNextFd and handling it
+static const int64_t kFdCondWaitTimeoutNs = 100000000;
 
 static const char kMetaKey_Version[]    = "com.android.version";
 static const char kMetaKey_Manufacturer[]      = "com.android.manufacturer";
@@ -1262,9 +1265,13 @@
         return OK;
     }
 
+    // Wait for the signal only if the new file is not available.
     if (mNextFd == -1) {
-        ALOGW("No FileDescriptor for next recording");
-        return INVALID_OPERATION;
+        status_t res = mFdCond.waitRelative(mLock, kFdCondWaitTimeoutNs);
+        if (res != OK) {
+            ALOGW("No FileDescriptor for next recording");
+            return INVALID_OPERATION;
+        }
     }
 
     mSwitchPending = true;
@@ -2433,6 +2440,7 @@
         return INVALID_OPERATION;
     }
     mNextFd = dup(fd);
+    mFdCond.signal();
     return OK;
 }
 
@@ -4886,8 +4894,15 @@
             int32_t mediaTime = (mFirstSampleStartOffsetUs * mTimeScale + 5E5) / 1E6;
             int32_t firstSampleOffsetTicks =
                     (mFirstSampleStartOffsetUs * mvhdTimeScale + 5E5) / 1E6;
-            // samples before 0 don't count in for duration, hence subtract firstSampleOffsetTicks.
-            addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+            if (tkhdDurationTicks >= firstSampleOffsetTicks) {
+                // samples before 0 don't count in for duration, hence subtract
+                // firstSampleOffsetTicks.
+                addOneElstTableEntry(tkhdDurationTicks - firstSampleOffsetTicks, mediaTime, 1, 0);
+            } else {
+                ALOGW("The track header duration %" PRId64
+                      " is smaller than the first sample offset %" PRId64,
+                      mTrackDurationUs, mFirstSampleStartOffsetUs);
+            }
         } else {
             // Track starting at zero.
             ALOGV("No edit list entry required for this track");
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 054a4b8..ee75129 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -144,6 +144,7 @@
     std::mutex mFallocMutex;
     bool mPreAllocFirstTime; // Pre-allocate space for file and track headers only once per file.
     uint64_t mPrevAllTracksTotalMetaDataSizeEstimate;
+    Condition mFdCond;
 
     List<Track *> mTracks;
 
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 197e202..def142c 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -49,16 +49,16 @@
 /*
  * AMediaDataSource's callbacks will be invoked on an implementation-defined thread
  * or thread pool. No guarantees are provided about which thread(s) will be used for
- * callbacks. For example, |close| can be invoked from a different thread than the
- * thread invoking |readAt|. As such, the Implementations of AMediaDataSource callbacks
+ * callbacks. For example, `close` can be invoked from a different thread than the
+ * thread invoking `readAt`. As such, the Implementations of AMediaDataSource callbacks
  * must be threadsafe.
  */
 
 /**
- * Called to request data from the given |offset|.
+ * Called to request data from the given `offset`.
  *
- * Implementations should should write up to |size| bytes into
- * |buffer|, and return the number of bytes written.
+ * Implementations should should write up to `size` bytes into
+ * `buffer`, and return the number of bytes written.
  *
  * Return 0 if size is zero (thus no bytes are read).
  *
@@ -78,9 +78,9 @@
  * Called to close the data source, unblock reads, and release associated
  * resources.
  *
- * The NDK media framework guarantees that after the first |close| is
+ * The NDK media framework guarantees that after the first `close` is
  * called, no future callbacks will be invoked on the data source except
- * for |close| itself.
+ * for `close` itself.
  *
  * Closing a data source allows readAt calls that were blocked waiting
  * for I/O data to return promptly.
@@ -101,7 +101,7 @@
 
 /**
  * Called to get an estimate of the number of bytes that can be read from this data source
- * starting at |offset| without blocking for I/O.
+ * starting at `offset` without blocking for I/O.
  *
  * Return -1 when such an estimate is not possible.
  */
@@ -111,10 +111,10 @@
  * Create new media data source. Returns NULL if memory allocation
  * for the new data source object fails.
  *
- * Set the |uri| from which the data source will read,
+ * Set the `uri` from which the data source will read,
  * plus additional http headers when initiating the request.
  *
- * Headers will contain corresponding items from |key_values|
+ * Headers will contain corresponding items from `key_values`
  * in the following fashion:
  *
  * key_values[0]:key_values[1]
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 68e9ad4..fe99241 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1583,7 +1583,7 @@
         // The chrome plane could be either Cb first, or Cr first. Take the
         // smaller address.
         uint8_t *src = std::min(yuvBuffer.dataCb, yuvBuffer.dataCr);
-        MediaImage2::PlaneIndex dstPlane = codecUvOffsetDiff > 0 ? MediaImage2::U : MediaImage2::V;
+        MediaImage2::PlaneIndex dstPlane = codecUPlaneFirst ? MediaImage2::U : MediaImage2::V;
         for (auto row = top/2; row < (top+height)/2; row++) {
             uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[dstPlane].mOffset +
                     imageInfo->mPlane[dstPlane].mRowInc * (row - top/2);