Merge "Camera: fix frame capture trace pair" into pi-dev
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 936733d..61fc897 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -893,7 +893,7 @@
                 VideoFrame *frame = (VideoFrame *)mem->pointer();
 
                 CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
-                            (uint8_t *)frame + sizeof(VideoFrame),
+                            frame->getFlattenedData(),
                             frame->mWidth, frame->mHeight), 0);
             }
 
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index 61b5127..f229751 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -22,7 +22,6 @@
 #include <android/hidl/manager/1.0/IServiceManager.h>
 
 #include <binder/IMemory.h>
-#include <cutils/native_handle.h>
 #include <hidlmemory/FrameworkUtils.h>
 #include <media/hardware/CryptoAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -245,11 +244,6 @@
         ALOGE("setHeapBase(): heap is NULL");
         return -1;
     }
-    native_handle_t* nativeHandle = native_handle_create(1, 0);
-    if (!nativeHandle) {
-        ALOGE("setHeapBase(), failed to create native handle");
-        return -1;
-    }
 
     Mutex::Autolock autoLock(mLock);
 
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 4e8ad52..06e8487 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -1186,9 +1186,9 @@
 
     DrmSessionManager::Instance()->useSession(sessionId);
 
-    Status status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
+    Return<Status> status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
             toHidlString(algorithm));
-    return toStatusT(status);
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
 }
 
 status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const &sessionId,
@@ -1198,9 +1198,9 @@
 
     DrmSessionManager::Instance()->useSession(sessionId);
 
-    Status status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
+    Return<Status> status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
             toHidlString(algorithm));
-    return toStatusT(status);
+    return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
 }
 
 status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index a9d4dd1..8b8824f 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -25,94 +25,32 @@
 
 namespace android {
 
-// Represents a color converted (RGB-based) video frame
-// with bitmap pixels stored in FrameBuffer
+// Represents a color converted (RGB-based) video frame with bitmap
+// pixels stored in FrameBuffer.
+// In a VideoFrame struct stored in IMemory, frame data and ICC data
+// come after the VideoFrame structure. Their locations can be retrieved
+// by getFlattenedData() and getFlattenedIccData();
 class VideoFrame
 {
 public:
     // Construct a VideoFrame object with the specified parameters,
-    // will allocate frame buffer if |allocate| is set to true, will
-    // allocate buffer to hold ICC data if |iccData| and |iccSize|
-    // indicate its presence.
+    // will calculate frame buffer size if |hasData| is set to true.
     VideoFrame(uint32_t width, uint32_t height,
             uint32_t displayWidth, uint32_t displayHeight,
-            uint32_t angle, uint32_t bpp, bool allocate,
-            const void *iccData, size_t iccSize):
+            uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
         mWidth(width), mHeight(height),
         mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
         mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
-        mSize(0), mIccSize(0), mReserved(0), mData(0), mIccData(0) {
-        if (allocate) {
-            mSize = mRowBytes * mHeight;
-            mData = new uint8_t[mSize];
-            if (mData == NULL) {
-                mSize = 0;
-            }
-        }
-
-        if (iccData != NULL && iccSize > 0) {
-            mIccSize = iccSize;
-            mIccData = new uint8_t[iccSize];
-            if (mIccData != NULL) {
-                memcpy(mIccData, iccData, iccSize);
-            } else {
-                mIccSize = 0;
-            }
-        }
+        mSize(hasData ? (bpp * width * height) : 0),
+        mIccSize(iccSize), mReserved(0) {
     }
 
-    // Deep copy of both the information fields and the frame data
-    VideoFrame(const VideoFrame& copy) {
-        copyInfoOnly(copy);
-
-        mSize = copy.mSize;
-        mData = NULL;  // initialize it first
-        if (mSize > 0 && copy.mData != NULL) {
-            mData = new uint8_t[mSize];
-            if (mData != NULL) {
-                memcpy(mData, copy.mData, mSize);
-            } else {
-                mSize = 0;
-            }
-        }
-
-        mIccSize = copy.mIccSize;
-        mIccData = NULL;  // initialize it first
-        if (mIccSize > 0 && copy.mIccData != NULL) {
-            mIccData = new uint8_t[mIccSize];
-            if (mIccData != NULL) {
-                memcpy(mIccData, copy.mIccData, mIccSize);
-            } else {
-                mIccSize = 0;
-            }
-        }
-    }
-
-    ~VideoFrame() {
-        if (mData != 0) {
-            delete[] mData;
-        }
-        if (mIccData != 0) {
-            delete[] mIccData;
-        }
-    }
-
-    // Copy |copy| to a flattened VideoFrame in IMemory, 'this' must point to
-    // a chunk of memory back by IMemory of size at least getFlattenedSize()
-    // of |copy|.
-    void copyFlattened(const VideoFrame& copy) {
-        copyInfoOnly(copy);
-
-        mSize = copy.mSize;
-        mData = NULL;  // initialize it first
-        if (copy.mSize > 0 && copy.mData != NULL) {
-            memcpy(getFlattenedData(), copy.mData, copy.mSize);
-        }
-
-        mIccSize = copy.mIccSize;
-        mIccData = NULL;  // initialize it first
-        if (copy.mIccSize > 0 && copy.mIccData != NULL) {
-            memcpy(getFlattenedIccData(), copy.mIccData, copy.mIccSize);
+    void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
+        *this = copy;
+        if (mIccSize == iccSize && iccSize > 0 && iccData != NULL) {
+            memcpy(getFlattenedIccData(), iccData, iccSize);
+        } else {
+            mIccSize = 0;
         }
     }
 
@@ -139,35 +77,9 @@
     int32_t  mRotationAngle;   // Rotation angle, clockwise, should be multiple of 90
     uint32_t mBytesPerPixel;   // Number of bytes per pixel
     uint32_t mRowBytes;        // Number of bytes per row before rotation
-    uint32_t mSize;            // Number of bytes in mData
-    uint32_t mIccSize;         // Number of bytes in mIccData
+    uint32_t mSize;            // Number of bytes of frame data
+    uint32_t mIccSize;         // Number of bytes of ICC data
     uint32_t mReserved;        // (padding to make mData 64-bit aligned)
-
-    // mData should be 64-bit aligned to prevent additional padding
-    uint8_t* mData;            // Actual binary data
-    // pad structure so it's the same size on 64-bit and 32-bit
-    char     mPadding[8 - sizeof(mData)];
-
-    // mIccData should be 64-bit aligned to prevent additional padding
-    uint8_t* mIccData;            // Actual binary data
-    // pad structure so it's the same size on 64-bit and 32-bit
-    char     mIccPadding[8 - sizeof(mIccData)];
-
-private:
-    //
-    // Utility methods used only within VideoFrame struct
-    //
-
-    // Copy the information fields only
-    void copyInfoOnly(const VideoFrame& copy) {
-        mWidth = copy.mWidth;
-        mHeight = copy.mHeight;
-        mDisplayWidth = copy.mDisplayWidth;
-        mDisplayHeight = copy.mDisplayHeight;
-        mRotationAngle = copy.mRotationAngle;
-        mBytesPerPixel = copy.mBytesPerPixel;
-        mRowBytes = copy.mRowBytes;
-    }
 };
 
 }; // namespace android
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 1826cc1..d657582 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -360,7 +360,15 @@
 
             res = mCluster->Parse(pos, len);
             ALOGV("Parse (2) returned %ld", res);
-            CHECK_GE(res, 0);
+
+            if (res < 0) {
+                // I/O error
+
+                ALOGE("Cluster::Parse returned result %ld", res);
+
+                mCluster = NULL;
+                break;
+            }
 
             mBlockEntryIndex = 0;
             continue;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index dad36ec..ce5ca63 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -628,7 +628,7 @@
             track->meta.setInt32(kKeyTrackID, imageIndex);
             track->includes_expensive_metadata = false;
             track->skipTrack = false;
-            track->timescale = 0;
+            track->timescale = 1000000;
         }
     }
 
diff --git a/media/img_utils/include/img_utils/DngUtils.h b/media/img_utils/include/img_utils/DngUtils.h
index 1d8df9c..de8f120 100644
--- a/media/img_utils/include/img_utils/DngUtils.h
+++ b/media/img_utils/include/img_utils/DngUtils.h
@@ -39,11 +39,16 @@
  */
 class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
     public:
+        // Note that the Adobe DNG 1.4 spec for Bayer phase (defined for the
+        // FixBadPixelsConstant and FixBadPixelsList opcodes) is incorrect. It's
+        // inconsistent with the DNG SDK (cf. dng_negative::SetBayerMosaic and
+        // dng_opcode_FixBadPixelsList::IsGreen), and Adobe confirms that the
+        // spec should be updated to match the SDK.
         enum CfaLayout {
-            CFA_RGGB = 0,
-            CFA_GRBG,
-            CFA_GBRG,
+            CFA_GRBG = 0,
+            CFA_RGGB,
             CFA_BGGR,
+            CFA_GBRG,
         };
 
         OpcodeListBuilder();
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
index 2623697..f618d3d 100644
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -495,7 +495,7 @@
     }
 
     void printStatus() override {
-        printf("state = %d, echo gain = %f ", mState, mEchoGain);
+        printf("st = %d, echo gain = %f ", mState, mEchoGain);
     }
 
     static void sendImpulse(float *outputData, int outputChannelCount) {
@@ -670,7 +670,7 @@
         printf(LOOPBACK_RESULT_TAG "phase.offset       = %7.5f\n", mPhaseOffset);
         printf(LOOPBACK_RESULT_TAG "ref.phase          = %7.5f\n", mPhase);
         printf(LOOPBACK_RESULT_TAG "frames.accumulated = %6d\n", mFramesAccumulated);
-        printf(LOOPBACK_RESULT_TAG "sine.period        = %6d\n", mPeriod);
+        printf(LOOPBACK_RESULT_TAG "sine.period        = %6d\n", mSinePeriod);
         printf(LOOPBACK_RESULT_TAG "test.state         = %6d\n", mState);
         printf(LOOPBACK_RESULT_TAG "frame.count        = %6d\n", mFrameCounter);
         // Did we ever get a lock?
@@ -684,7 +684,7 @@
     }
 
     void printStatus() override {
-        printf("  state = %d, glitches = %3d,", mState, mGlitchCount);
+        printf("st = %d, #gl = %3d,", mState, mGlitchCount);
     }
 
     double calculateMagnitude(double *phasePtr = NULL) {
@@ -709,6 +709,8 @@
     void process(float *inputData, int inputChannelCount,
                  float *outputData, int outputChannelCount,
                  int numFrames) override {
+        mProcessCount++;
+
         float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
         if (peak > mPeakAmplitude) {
             mPeakAmplitude = peak;
@@ -720,6 +722,7 @@
             float sinOut = sinf(mPhase);
 
             switch (mState) {
+                case STATE_IDLE:
                 case STATE_IMMUNE:
                 case STATE_WAITING_FOR_SIGNAL:
                     break;
@@ -728,7 +731,7 @@
                     mCosAccumulator += sample * cosf(mPhase);
                     mFramesAccumulated++;
                     // Must be a multiple of the period or the calculation will not be accurate.
-                    if (mFramesAccumulated == mPeriod * 4) {
+                    if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
                         mPhaseOffset = 0.0;
                         mMagnitude = calculateMagnitude(&mPhaseOffset);
                         if (mMagnitude > mThreshold) {
@@ -754,7 +757,22 @@
                         //       mFrameCounter, mGlitchCount, predicted, sample);
                         mState = STATE_IMMUNE;
                         //printf("%5d: switch to STATE_IMMUNE\n", mFrameCounter);
-                        mDownCounter = mPeriod;  // Set duration of IMMUNE state.
+                        mDownCounter = mSinePeriod;  // Set duration of IMMUNE state.
+                    }
+
+                    // Track incoming signal and slowly adjust magnitude to account
+                    // for drift in the DRC or AGC.
+                    mSinAccumulator += sample * sinOut;
+                    mCosAccumulator += sample * cosf(mPhase);
+                    mFramesAccumulated++;
+                    // Must be a multiple of the period or the calculation will not be accurate.
+                    if (mFramesAccumulated == mSinePeriod) {
+                        const double coefficient = 0.1;
+                        double phaseOffset = 0.0;
+                        double magnitude = calculateMagnitude(&phaseOffset);
+                        // One pole averaging filter.
+                        mMagnitude = (mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient);
+                        resetAccumulator();
                     }
                 } break;
             }
@@ -775,6 +793,9 @@
 
         // Do these once per buffer.
         switch (mState) {
+            case STATE_IDLE:
+                mState = STATE_IMMUNE; // so we can tell when
+                break;
             case STATE_IMMUNE:
                 mDownCounter -= numFrames;
                 if (mDownCounter <= 0) {
@@ -805,21 +826,29 @@
     void reset() override {
         mGlitchCount = 0;
         mState = STATE_IMMUNE;
-        mPhaseIncrement = 2.0 * M_PI / mPeriod;
-        printf("phaseInc = %f for period %d\n", mPhaseIncrement, mPeriod);
+        mDownCounter = IMMUNE_FRAME_COUNT;
+        mPhaseIncrement = 2.0 * M_PI / mSinePeriod;
+        printf("phaseInc = %f for period %d\n", mPhaseIncrement, mSinePeriod);
         resetAccumulator();
+        mProcessCount = 0;
     }
 
 private:
 
     enum sine_state_t {
+        STATE_IDLE,
         STATE_IMMUNE,
         STATE_WAITING_FOR_SIGNAL,
         STATE_WAITING_FOR_LOCK,
         STATE_LOCKED
     };
 
-    int     mPeriod = 79;
+    enum constants {
+        IMMUNE_FRAME_COUNT = 48 * 500,
+        PERIODS_NEEDED_FOR_LOCK = 8
+    };
+
+    int     mSinePeriod = 79;
     double  mPhaseIncrement = 0.0;
     double  mPhase = 0.0;
     double  mPhaseOffset = 0.0;
@@ -828,18 +857,19 @@
     double  mThreshold = 0.005;
     double  mTolerance = 0.01;
     int32_t mFramesAccumulated = 0;
+    int32_t mProcessCount = 0;
     double  mSinAccumulator = 0.0;
     double  mCosAccumulator = 0.0;
     int32_t mGlitchCount = 0;
     double  mPeakAmplitude = 0.0;
-    int     mDownCounter = 4000;
+    int     mDownCounter = IMMUNE_FRAME_COUNT;
     int32_t mFrameCounter = 0;
     float   mOutputAmplitude = 0.75;
 
     PseudoRandom  mWhiteNoise;
     float   mNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
 
-    sine_state_t  mState = STATE_IMMUNE;
+    sine_state_t  mState = STATE_IDLE;
 };
 
 
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 0ebcdbd..26d1e4b 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -38,21 +38,25 @@
 // Tag for machine readable results as property = value pairs
 #define RESULT_TAG              "RESULT: "
 #define NUM_SECONDS             5
+#define PERIOD_MILLIS           1000
 #define NUM_INPUT_CHANNELS      1
 #define FILENAME_ALL            "/data/loopback_all.wav"
 #define FILENAME_ECHOS          "/data/loopback_echos.wav"
-#define APP_VERSION             "0.2.01"
+#define APP_VERSION             "0.2.03"
+
+constexpr int kNumCallbacksToDrain   = 20;
+constexpr int kNumCallbacksToDiscard = 20;
 
 struct LoopbackData {
     AAudioStream      *inputStream = nullptr;
     int32_t            inputFramesMaximum = 0;
     int16_t           *inputShortData = nullptr;
     float             *inputFloatData = nullptr;
-    int16_t            peakShort = 0;
     aaudio_format_t    actualInputFormat = AAUDIO_FORMAT_INVALID;
     int32_t            actualInputChannelCount = 0;
     int32_t            actualOutputChannelCount = 0;
-    int32_t            inputBuffersToDiscard = 10;
+    int32_t            numCallbacksToDrain = kNumCallbacksToDrain;
+    int32_t            numCallbacksToDiscard = kNumCallbacksToDiscard;
     int32_t            minNumFrames = INT32_MAX;
     int32_t            maxNumFrames = 0;
     int32_t            insufficientReadCount = 0;
@@ -131,23 +135,32 @@
         myData->minNumFrames = numFrames;
     }
 
-    if (myData->inputBuffersToDiscard > 0) {
+    // Silence the output.
+    int32_t numBytes = numFrames * myData->actualOutputChannelCount * sizeof(float);
+    memset(audioData, 0 /* value */, numBytes);
+
+    if (myData->numCallbacksToDrain > 0) {
         // Drain the input.
         do {
             framesRead = readFormattedData(myData, numFrames);
-            if (framesRead < 0) {
-                result = AAUDIO_CALLBACK_RESULT_STOP;
-            } else if (framesRead > 0) {
-                myData->inputBuffersToDiscard--;
-            }
+            // Ignore errors because input stream may not be started yet.
         } while (framesRead > 0);
+        myData->numCallbacksToDrain--;
 
-        // Silence the output.
-        int32_t numBytes = numFrames * myData->actualOutputChannelCount * sizeof(float);
-        memset(audioData, 0 /* value */, numBytes);
+    } else if (myData->numCallbacksToDiscard > 0) {
+        // Ignore. Allow the input to fill back up to equilibrium with the output.
+        framesRead = readFormattedData(myData, numFrames);
+        if (framesRead < 0) {
+            result = AAUDIO_CALLBACK_RESULT_STOP;
+        }
+        myData->numCallbacksToDiscard--;
 
     } else {
 
+        int32_t numInputBytes = numFrames * myData->actualInputChannelCount * sizeof(float);
+        memset(myData->inputFloatData, 0 /* value */, numInputBytes);
+
+        // Process data after equilibrium.
         int64_t inputFramesWritten = AAudioStream_getFramesWritten(myData->inputStream);
         int64_t inputFramesRead = AAudioStream_getFramesRead(myData->inputStream);
         int64_t framesAvailable = inputFramesWritten - inputFramesRead;
@@ -155,6 +168,7 @@
         if (framesRead < 0) {
             result = AAUDIO_CALLBACK_RESULT_STOP;
         } else {
+
             if (framesRead < numFrames) {
                 if(framesRead < (int32_t) framesAvailable) {
                     printf("insufficient but numFrames = %d, framesRead = %d, available = %d\n",
@@ -172,13 +186,13 @@
             // Save for later.
             myData->audioRecording.write(myData->inputFloatData,
                                          myData->actualInputChannelCount,
-                                         framesRead);
+                                         numFrames);
             // Analyze the data.
             myData->loopbackProcessor->process(myData->inputFloatData,
                                                myData->actualInputChannelCount,
                                                outputData,
                                                myData->actualOutputChannelCount,
-                                               framesRead);
+                                               numFrames);
             myData->isDone = myData->loopbackProcessor->isDone();
             if (myData->isDone) {
                 result = AAUDIO_CALLBACK_RESULT_STOP;
@@ -366,7 +380,9 @@
     }
 
     int32_t requestedDuration = argParser.getDurationSeconds();
-    int32_t recordingDuration = std::min(60, requestedDuration);
+    int32_t requestedDurationMillis = requestedDuration * MILLIS_PER_SECOND;
+    int32_t timeMillis = 0;
+    int32_t recordingDuration = std::min(60 * 5, requestedDuration);
 
     switch(testMode) {
         case TEST_SINE_MAGNITUDE:
@@ -449,7 +465,6 @@
 
     // Allocate a buffer for the audio data.
     loopbackData.inputFramesMaximum = 32 * AAudioStream_getFramesPerBurst(inputStream);
-    loopbackData.inputBuffersToDiscard = 200;
 
     if (loopbackData.actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
         loopbackData.inputShortData = new int16_t[loopbackData.inputFramesMaximum
@@ -460,13 +475,7 @@
 
     loopbackData.loopbackProcessor->reset();
 
-    result = recorder.start();
-    if (result != AAUDIO_OK) {
-        printf("ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
-               result, AAudio_convertResultToText(result));
-        goto finish;
-    }
-
+    // Start OUTPUT first so INPUT does not overflow.
     result = player.start();
     if (result != AAUDIO_OK) {
         printf("ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
@@ -474,9 +483,15 @@
         goto finish;
     }
 
-    printf("------- sleep while the callback runs --------------\n");
-    fflush(stdout);
-    for (int i = requestedDuration; i > 0 ; i--) {
+    result = recorder.start();
+    if (result != AAUDIO_OK) {
+        printf("ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
+               result, AAudio_convertResultToText(result));
+        goto finish;
+    }
+
+    printf("------- sleep and log while the callback runs --------------\n");
+    while (timeMillis <= requestedDurationMillis) {
         if (loopbackData.inputError != AAUDIO_OK) {
             printf("  ERROR on input stream\n");
             break;
@@ -487,10 +502,9 @@
                 printf("  test says it is done!\n");
                 break;
         } else {
-            sleep(1);
-            printf("%4d: ", i);
+            // Log a line of stream data.
+            printf("%7.3f: ", 0.001 * timeMillis); // display in seconds
             loopbackData.loopbackProcessor->printStatus();
-
             printf(" insf %3d,", (int) loopbackData.insufficientReadCount);
 
             int64_t inputFramesWritten = AAudioStream_getFramesWritten(inputStream);
@@ -498,7 +512,7 @@
             int64_t outputFramesWritten = AAudioStream_getFramesWritten(outputStream);
             int64_t outputFramesRead = AAudioStream_getFramesRead(outputStream);
             static const int textOffset = strlen("AAUDIO_STREAM_STATE_"); // strip this off
-            printf(" INPUT: wr %7lld - rd %7lld = %5lld, state %s, oruns %3d | ",
+            printf(" | INPUT: wr %7lld - rd %7lld = %5lld, st %8s, oruns %3d",
                    (long long) inputFramesWritten,
                    (long long) inputFramesRead,
                    (long long) (inputFramesWritten - inputFramesRead),
@@ -506,7 +520,7 @@
                            AAudioStream_getState(inputStream))[textOffset],
                    AAudioStream_getXRunCount(inputStream));
 
-            printf(" OUTPUT: wr %7lld - rd %7lld = %5lld, state %s, uruns %3d\n",
+            printf(" | OUTPUT: wr %7lld - rd %7lld = %5lld, st %8s, uruns %3d\n",
                    (long long) outputFramesWritten,
                    (long long) outputFramesRead,
                     (long long) (outputFramesWritten - outputFramesRead),
@@ -515,6 +529,9 @@
                    AAudioStream_getXRunCount(outputStream)
             );
         }
+        int32_t periodMillis = (timeMillis < 2000) ? PERIOD_MILLIS / 4 : PERIOD_MILLIS;
+        usleep(periodMillis * 1000);
+        timeMillis += periodMillis;
     }
 
     result = player.stop();
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 4a2a0a8..40ebb76 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -59,10 +59,15 @@
     return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
 }
 
+// Clip to valid range of a float sample to prevent excessive volume.
+// By using fmin and fmax we also protect against NaN.
+static float clipToMinMaxHeadroom(float input) {
+    return fmin(MAX_HEADROOM, fmax(MIN_HEADROOM, input));
+}
+
 static float clipAndClampFloatToPcm16(float sample, float scaler) {
     // Clip to valid range of a float sample to prevent excessive volume.
-    if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
-    else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+    sample = clipToMinMaxHeadroom(sample);
 
     // Scale and convert to a short.
     float fval = sample * scaler;
@@ -127,6 +132,7 @@
     }
 }
 
+
 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
 void AAudio_linearRamp(const float *source,
                        float *destination,
@@ -139,10 +145,8 @@
     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
             float sample = *source++;
-
             // Clip to valid range of a float sample to prevent excessive volume.
-            if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
-            else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+            sample = clipToMinMaxHeadroom(sample);
 
             *destination++ = sample * scaler;
         }
@@ -240,8 +244,7 @@
         float sample = *source++;
 
         // Clip to valid range of a float sample to prevent excessive volume.
-        if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
-        else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+        sample = clipToMinMaxHeadroom(sample);
 
         const float scaler = amplitude1 + (frameIndex * delta);
         float sampleScaled = sample * scaler;
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 1c4a80e..39ee437 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -20,6 +20,7 @@
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
+#include <binder/IPCThreadState.h>
 #include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
 #include <media/IAudioFlinger.h>
@@ -75,7 +76,9 @@
         af = gAudioFlinger;
     }
     if (afc != 0) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         af->registerClient(afc);
+        IPCThreadState::self()->restoreCallingIdentity(token);
     }
     return af;
 }
@@ -767,7 +770,10 @@
         ap = gAudioPolicyService;
     }
     if (apc != 0) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         ap->registerClient(apc);
+        ap->setAudioPortCallbacksEnabled(apc->isAudioPortCbEnabled());
+        IPCThreadState::self()->restoreCallingIdentity(token);
     }
 
     return ap;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 8c9d3c1..00af7e8 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -871,7 +871,6 @@
     switch (code) {
         case SET_STREAM_VOLUME:
         case SET_STREAM_MUTE:
-        case SET_MODE:
         case OPEN_OUTPUT:
         case OPEN_DUPLICATE_OUTPUT:
         case CLOSE_OUTPUT:
@@ -892,7 +891,15 @@
         case SET_RECORD_SILENCED:
             ALOGW("%s: transaction %d received from PID %d",
                   __func__, code, IPCThreadState::self()->getCallingPid());
-            return INVALID_OPERATION;
+            // return status only for non void methods
+            switch (code) {
+                case SET_RECORD_SILENCED:
+                    break;
+                default:
+                    reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                    break;
+            }
+            return OK;
         default:
             break;
     }
@@ -909,7 +916,15 @@
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
                       IPCThreadState::self()->getCallingUid());
-                return INVALID_OPERATION;
+                // return status only for non void methods
+                switch (code) {
+                    case SYSTEM_READY:
+                        break;
+                    default:
+                        reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                        break;
+                }
+                return OK;
             }
         } break;
         default:
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 35f9727..abb502b 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -857,7 +857,16 @@
         case RELEASE_SOUNDTRIGGER_SESSION:
             ALOGW("%s: transaction %d received from PID %d",
                   __func__, code, IPCThreadState::self()->getCallingPid());
-            return INVALID_OPERATION;
+            // return status only for non void methods
+            switch (code) {
+                case RELEASE_OUTPUT:
+                case RELEASE_INPUT:
+                    break;
+                default:
+                    reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                    break;
+            }
+            return OK;
         default:
             break;
     }
@@ -867,7 +876,6 @@
         case SET_DEVICE_CONNECTION_STATE:
         case HANDLE_DEVICE_CONFIG_CHANGE:
         case SET_PHONE_STATE:
-        case SET_RINGER_MODE:
         case SET_FORCE_USE:
         case INIT_STREAM_VOLUME:
         case SET_STREAM_VOLUME:
@@ -879,7 +887,8 @@
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
                       IPCThreadState::self()->getCallingUid());
-                return INVALID_OPERATION;
+                reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+                return OK;
             }
         } break;
         default:
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 22b700d..0c4d6ee 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -431,6 +431,7 @@
 
         int addAudioPortCallback(const sp<AudioPortCallback>& callback);
         int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+        bool isAudioPortCbEnabled() const { return (mAudioPortCallbacks.size() != 0); }
 
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index 6f56d0c..272bc30 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -31,6 +31,18 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <utils/Errors.h>
 
+// TODO: remove forward declaration when AMediaExtractor_disconnect is offcially added to NDK
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+media_status_t AMediaExtractor_disconnect(AMediaExtractor *);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
 namespace android {
 
 static const size_t kAESBlockSize = 16;  // AES_BLOCK_SIZE
diff --git a/media/libmedia/include/media/CodecServiceRegistrant.h b/media/libmedia/include/media/CodecServiceRegistrant.h
new file mode 100644
index 0000000..e0af781
--- /dev/null
+++ b/media/libmedia/include/media/CodecServiceRegistrant.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC_SERVICE_REGISTRANT_H_
+
+#define CODEC_SERVICE_REGISTRANT_H_
+
+typedef void (*RegisterCodecServicesFunc)();
+
+#endif  // CODEC_SERVICE_REGISTRANT_H_
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index c45a964..992e230 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -43,12 +43,12 @@
 
     virtual status_t    setDataSource(int fd, int64_t offset, int64_t length) = 0;
     virtual status_t    setDataSource(const sp<DataSource>& source, const char *mime) = 0;
-    virtual VideoFrame* getFrameAtTime(
+    virtual sp<IMemory> getFrameAtTime(
             int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
-    virtual VideoFrame* getImageAtIndex(
+    virtual sp<IMemory> getImageAtIndex(
             int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
     virtual status_t getFrameAtIndex(
-            std::vector<VideoFrame*>* frames,
+            std::vector<sp<IMemory> >* frames,
             int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
     virtual MediaAlbumArt* extractAlbumArt() = 0;
     virtual const char* extractMetadata(int keyCode) = 0;
@@ -61,14 +61,14 @@
     MediaMetadataRetrieverInterface() {}
 
     virtual             ~MediaMetadataRetrieverInterface() {}
-    virtual VideoFrame* getFrameAtTime(
+    virtual sp<IMemory> getFrameAtTime(
             int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
     { return NULL; }
-    virtual VideoFrame* getImageAtIndex(
+    virtual sp<IMemory> getImageAtIndex(
             int /*index*/, int /*colorFormat*/, bool /*metaOnly*/, bool /*thumbnail*/)
     { return NULL; }
     virtual status_t getFrameAtIndex(
-            std::vector<VideoFrame*>* /*frames*/,
+            std::vector<sp<IMemory> >* /*frames*/,
             int /*frameIndex*/, int /*numFrames*/, int /*colorFormat*/, bool /*metaOnly*/)
     { return ERROR_UNSUPPORTED; }
     virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 3b3ac29..672b832 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -194,25 +194,6 @@
 
 Mutex MetadataRetrieverClient::sLock;
 
-static sp<IMemory> getThumbnail(VideoFrame* frame) {
-    std::unique_ptr<VideoFrame> frameDeleter(frame);
-
-    size_t size = frame->getFlattenedSize();
-    sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
-    if (heap == NULL) {
-        ALOGE("failed to create MemoryDealer");
-        return NULL;
-    }
-    sp<IMemory> thrumbnail = new MemoryBase(heap, 0, size);
-    if (thrumbnail == NULL) {
-        ALOGE("not enough memory for VideoFrame size=%zu", size);
-        return NULL;
-    }
-    VideoFrame *frameCopy = static_cast<VideoFrame *>(thrumbnail->pointer());
-    frameCopy->copyFlattened(*frame);
-    return thrumbnail;
-}
-
 sp<IMemory> MetadataRetrieverClient::getFrameAtTime(
         int64_t timeUs, int option, int colorFormat, bool metaOnly)
 {
@@ -225,12 +206,12 @@
         ALOGE("retriever is not initialized");
         return NULL;
     }
-    VideoFrame *frame = mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
+    sp<IMemory> frame = mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
     if (frame == NULL) {
         ALOGE("failed to capture a video frame");
         return NULL;
     }
-    return getThumbnail(frame);
+    return frame;
 }
 
 sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
@@ -244,12 +225,12 @@
         ALOGE("retriever is not initialized");
         return NULL;
     }
-    VideoFrame *frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+    sp<IMemory> frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
     if (frame == NULL) {
         ALOGE("failed to extract image");
         return NULL;
     }
-    return getThumbnail(frame);
+    return frame;
 }
 
 status_t MetadataRetrieverClient::getFrameAtIndex(
@@ -264,15 +245,12 @@
         return INVALID_OPERATION;
     }
 
-    std::vector<VideoFrame*> videoFrames;
     status_t err = mRetriever->getFrameAtIndex(
-            &videoFrames, frameIndex, numFrames, colorFormat, metaOnly);
+            frames, frameIndex, numFrames, colorFormat, metaOnly);
     if (err != OK) {
+        frames->clear();
         return err;
     }
-    for (size_t i = 0; i < videoFrames.size(); i++) {
-        frames->push_back(getThumbnail(videoFrames[i]));
-    }
     return OK;
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index a762e76..3a28bbd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1703,6 +1703,8 @@
     ++mAudioDrainGeneration;
     if (mAudioRenderingStartGeneration != -1) {
         prepareForMediaRenderingStart_l();
+        // PauseTimeout is applied to offload mode only. Cancel pending timer.
+        cancelAudioOffloadPauseTimeout();
     }
 }
 
@@ -1805,6 +1807,12 @@
     if (mAudioTornDown) {
         return;
     }
+
+    // TimeoutWhenPaused is only for offload mode.
+    if (reason == kDueToTimeout && !offloadingAudio()) {
+        return;
+    }
+
     mAudioTornDown = true;
 
     int64_t currentPositionUs;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index ad81f04..2abea9e 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2144,6 +2144,10 @@
                 // value is unknown
                 drc.targetRefLevel = -1;
             }
+            if (!msg->findInt32("aac-drc-effect-type", &drc.effectType)) {
+                // value is unknown
+                drc.effectType = -2; // valid values are -1 and over
+            }
 
             err = setupAACCodec(
                     encoder, numChannels, sampleRate, bitrate, aacProfile,
@@ -2778,7 +2782,7 @@
             ? OMX_AUDIO_AACStreamFormatMP4ADTS
             : OMX_AUDIO_AACStreamFormatMP4FF;
 
-    OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation;
+    OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE presentation;
     InitOMXParams(&presentation);
     presentation.nMaxOutputChannels = maxOutputChannelCount;
     presentation.nDrcCut = drc.drcCut;
@@ -2787,14 +2791,29 @@
     presentation.nTargetReferenceLevel = drc.targetRefLevel;
     presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
     presentation.nPCMLimiterEnable = pcmLimiterEnable;
+    presentation.nDrcEffectType = drc.effectType;
 
     status_t res = mOMXNode->setParameter(
             OMX_IndexParamAudioAac, &profile, sizeof(profile));
     if (res == OK) {
         // optional parameters, will not cause configuration failure
-        mOMXNode->setParameter(
+        if (mOMXNode->setParameter(
+                (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
+                &presentation, sizeof(presentation)) == ERROR_UNSUPPORTED) {
+            // prior to 9.0 we used a different config structure and index
+            OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation8;
+            InitOMXParams(&presentation8);
+            presentation8.nMaxOutputChannels = presentation.nMaxOutputChannels;
+            presentation8.nDrcCut = presentation.nDrcCut;
+            presentation8.nDrcBoost = presentation.nDrcBoost;
+            presentation8.nHeavyCompression = presentation.nHeavyCompression;
+            presentation8.nTargetReferenceLevel = presentation.nTargetReferenceLevel;
+            presentation8.nEncodedTargetLevel = presentation.nEncodedTargetLevel;
+            presentation8.nPCMLimiterEnable = presentation.nPCMLimiterEnable;
+            (void)mOMXNode->setParameter(
                 (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
-                &presentation, sizeof(presentation));
+                &presentation8, sizeof(presentation8));
+        }
     } else {
         ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
     }
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index a00d13a..09a8be5 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -17,12 +17,11 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "FrameDecoder"
 
-#include <inttypes.h>
-
-#include <utils/Log.h>
-#include <gui/Surface.h>
-
 #include "include/FrameDecoder.h"
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <gui/Surface.h>
+#include <inttypes.h>
 #include <media/ICrypto.h>
 #include <media/IMediaSource.h>
 #include <media/MediaCodecBuffer.h>
@@ -36,14 +35,15 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 #include <private/media/VideoFrame.h>
+#include <utils/Log.h>
 
 namespace android {
 
-static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
-static const size_t kRetryCount = 20; // must be >0
+static const int64_t kBufferTimeOutUs = 10000ll; // 10 msec
+static const size_t kRetryCount = 50; // must be >0
 
 //static
-VideoFrame *allocVideoFrame(const sp<MetaData> &trackMeta,
+sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
         int32_t width, int32_t height, int32_t dstBpp, bool metaOnly = false) {
     int32_t rotationAngle;
     if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
@@ -74,8 +74,24 @@
         displayHeight = height;
     }
 
-    return new VideoFrame(width, height, displayWidth, displayHeight,
-            rotationAngle, dstBpp, !metaOnly, iccData, iccSize);
+    VideoFrame frame(width, height, displayWidth, displayHeight,
+            rotationAngle, dstBpp, !metaOnly, iccSize);
+
+    size_t size = frame.getFlattenedSize();
+    sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
+    if (heap == NULL) {
+        ALOGE("failed to create MemoryDealer");
+        return NULL;
+    }
+    sp<IMemory> frameMem = new MemoryBase(heap, 0, size);
+    if (frameMem == NULL) {
+        ALOGE("not enough memory for VideoFrame size=%zu", size);
+        return NULL;
+    }
+    VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->pointer());
+    frameCopy->init(frame, iccData, iccSize);
+
+    return frameMem;
 }
 
 //static
@@ -92,7 +108,7 @@
 }
 
 //static
-VideoFrame* FrameDecoder::getMetadataOnly(
+sp<IMemory> FrameDecoder::getMetadataOnly(
         const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
     OMX_COLOR_FORMATTYPE dstFormat;
     int32_t dstBpp;
@@ -146,7 +162,7 @@
     return false;
 }
 
-VideoFrame* FrameDecoder::extractFrame(
+sp<IMemory> FrameDecoder::extractFrame(
         int64_t frameTimeUs, int option, int colorFormat) {
     if (!getDstColorFormat(
             (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
@@ -158,12 +174,12 @@
         return NULL;
     }
 
-    return mFrames.size() > 0 ? mFrames[0].release() : NULL;
+    return mFrames.size() > 0 ? mFrames[0] : NULL;
 }
 
 status_t FrameDecoder::extractFrames(
         int64_t frameTimeUs, size_t numFrames, int option, int colorFormat,
-        std::vector<VideoFrame*>* frames) {
+        std::vector<sp<IMemory> >* frames) {
     if (!getDstColorFormat(
             (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
         return ERROR_UNSUPPORTED;
@@ -175,7 +191,7 @@
     }
 
     for (size_t i = 0; i < mFrames.size(); i++) {
-        frames->push_back(mFrames[i].release());
+        frames->push_back(mFrames[i]);
     }
     return OK;
 }
@@ -253,10 +269,13 @@
         uint32_t flags = 0;
         sp<MediaCodecBuffer> codecBuffer = NULL;
 
+        // Queue as many inputs as we possibly can, then block on dequeuing
+        // outputs. After getting each output, come back and queue the inputs
+        // again to keep the decoder busy.
         while (haveMoreInputs) {
-            err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
+            err = decoder->dequeueInputBuffer(&inputIndex, 0);
             if (err != OK) {
-                ALOGW("Timed out waiting for input");
+                ALOGV("Timed out waiting for input");
                 if (retriesLeft) {
                     err = OK;
                 }
@@ -295,27 +314,21 @@
             }
 
             mediaBuffer->release();
-            break;
-        }
 
-        if (haveMoreInputs && inputIndex < inputBuffers.size()) {
-            ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
-                    codecBuffer->size(), ptsUs, flags);
+            if (haveMoreInputs && inputIndex < inputBuffers.size()) {
+                ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
+                        codecBuffer->size(), ptsUs, flags);
 
-            err = decoder->queueInputBuffer(
-                    inputIndex,
-                    codecBuffer->offset(),
-                    codecBuffer->size(),
-                    ptsUs,
-                    flags);
+                err = decoder->queueInputBuffer(
+                        inputIndex,
+                        codecBuffer->offset(),
+                        codecBuffer->size(),
+                        ptsUs,
+                        flags);
 
-            if (flags & MediaCodec::BUFFER_FLAG_EOS) {
-                haveMoreInputs = false;
-            }
-
-            // we don't expect an output from codec config buffer
-            if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
-                continue;
+                if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+                    haveMoreInputs = false;
+                }
             }
         }
 
@@ -468,12 +481,13 @@
         crop_bottom = height - 1;
     }
 
-    VideoFrame *frame = allocVideoFrame(
+    sp<IMemory> frameMem = allocVideoFrame(
             trackMeta(),
             (crop_right - crop_left + 1),
             (crop_bottom - crop_top + 1),
             dstBpp());
-    addFrame(frame);
+    addFrame(frameMem);
+    VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
 
     int32_t srcFormat;
     CHECK(outputFormat->findInt32("color-format", &srcFormat));
@@ -485,7 +499,7 @@
                 (const uint8_t *)videoFrameBuffer->data(),
                 width, height,
                 crop_left, crop_top, crop_right, crop_bottom,
-                frame->mData,
+                frame->getFlattenedData(),
                 frame->mWidth,
                 frame->mHeight,
                 crop_left, crop_top, crop_right, crop_bottom);
@@ -596,9 +610,10 @@
     }
 
     if (mFrame == NULL) {
-        mFrame = allocVideoFrame(trackMeta(), imageWidth, imageHeight, dstBpp());
+        sp<IMemory> frameMem = allocVideoFrame(trackMeta(), imageWidth, imageHeight, dstBpp());
+        mFrame = static_cast<VideoFrame*>(frameMem->pointer());
 
-        addFrame(mFrame);
+        addFrame(frameMem);
     }
 
     int32_t srcFormat;
@@ -639,7 +654,7 @@
                 (const uint8_t *)videoFrameBuffer->data(),
                 width, height,
                 crop_left, crop_top, crop_right, crop_bottom,
-                mFrame->mData,
+                mFrame->getFlattenedData(),
                 mFrame->mWidth,
                 mFrame->mHeight,
                 dstLeft, dstTop, dstRight, dstBottom);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index a3261d7..550a99c 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -496,7 +496,6 @@
     mStreamableFile = false;
     mTimeScale = -1;
     mHasFileLevelMeta = false;
-    mHasMoovBox = false;
     mPrimaryItemId = 0;
     mAssociationEntryCount = 0;
     mNumGrids = 0;
@@ -505,6 +504,7 @@
     // And they will stay the same for all the recording sessions.
     if (isFirstSession) {
         mMoovExtraSize = 0;
+        mHasMoovBox = false;
         mMetaKeys = new AMessage();
         addDeviceMeta();
         mLatitudex10000 = 0;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 23bee49..06a49d0 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -764,7 +764,7 @@
 
     // ignore stuff with no presentation time
     if (presentationUs <= 0) {
-        ALOGD("-- returned buffer has bad timestamp %" PRId64 ", ignore it", presentationUs);
+        ALOGV("-- returned buffer timestamp %" PRId64 " <= 0, ignore it", presentationUs);
         mLatencyUnknown++;
         return;
     }
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index 4ff2bfe..e010b3e 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -40,7 +40,8 @@
         ".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
         ".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
         ".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
-        ".avi", ".mpeg", ".mpg", ".awb", ".mpga", ".mov"
+        ".avi", ".mpeg", ".mpg", ".awb", ".mpga", ".mov",
+        ".m4v", ".oga"
     };
     static const size_t kNumValidExtensions =
         sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
@@ -62,6 +63,11 @@
     client.setLocale(locale());
     client.beginFile();
     MediaScanResult result = processFileInternal(path, mimeType, client);
+    ALOGV("result: %d", result);
+    if (mimeType == NULL && result != MEDIA_SCAN_RESULT_OK) {
+        ALOGW("media scan failed for %s", path);
+        client.setMimeType("application/octet-stream");
+    }
     client.endFile();
     return result;
 }
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 6ad6004..5417fef 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -124,7 +124,7 @@
     return OK;
 }
 
-VideoFrame* StagefrightMetadataRetriever::getImageAtIndex(
+sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
         int index, int colorFormat, bool metaOnly, bool thumbnail) {
 
     ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
@@ -193,7 +193,7 @@
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
         const AString &componentName = matchingCodecs[i];
         ImageDecoder decoder(componentName, trackMeta, source);
-        VideoFrame* frame = decoder.extractFrame(
+        sp<IMemory> frame = decoder.extractFrame(
                 thumbnail ? -1 : 0 /*frameTimeUs*/, 0 /*seekMode*/, colorFormat);
 
         if (frame != NULL) {
@@ -205,19 +205,19 @@
     return NULL;
 }
 
-VideoFrame* StagefrightMetadataRetriever::getFrameAtTime(
+sp<IMemory> StagefrightMetadataRetriever::getFrameAtTime(
         int64_t timeUs, int option, int colorFormat, bool metaOnly) {
     ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
             timeUs, option, colorFormat, metaOnly);
 
-    VideoFrame *frame;
+    sp<IMemory> frame;
     status_t err = getFrameInternal(
             timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
     return (err == OK) ? frame : NULL;
 }
 
 status_t StagefrightMetadataRetriever::getFrameAtIndex(
-        std::vector<VideoFrame*>* frames,
+        std::vector<sp<IMemory> >* frames,
         int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
     ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
             frameIndex, numFrames, colorFormat, metaOnly);
@@ -229,7 +229,7 @@
 
 status_t StagefrightMetadataRetriever::getFrameInternal(
         int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-        VideoFrame **outFrame, std::vector<VideoFrame*>* outFrames) {
+        sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames) {
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
         return NO_INIT;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index c61f4b5..cf5e91e 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -154,6 +154,7 @@
         { 23, OMX_AUDIO_AACObjectLD       },
         { 29, OMX_AUDIO_AACObjectHE_PS    },
         { 39, OMX_AUDIO_AACObjectELD      },
+        { 42, OMX_AUDIO_AACObjectXHE      },
     };
 
     OMX_AUDIO_AACPROFILETYPE profile;
@@ -1610,6 +1611,7 @@
     { OMX_AUDIO_AACObjectLD,          AUDIO_FORMAT_AAC_LD},
     { OMX_AUDIO_AACObjectHE_PS,       AUDIO_FORMAT_AAC_HE_V2},
     { OMX_AUDIO_AACObjectELD,         AUDIO_FORMAT_AAC_ELD},
+    { OMX_AUDIO_AACObjectXHE,         AUDIO_FORMAT_AAC_XHE},
     { OMX_AUDIO_AACObjectNull,        AUDIO_FORMAT_AAC},
 };
 
diff --git a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
index 129ad65..95d3724 100644
--- a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
+++ b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
@@ -24,7 +24,7 @@
 //#define DRC_PRES_MODE_WRAP_DEBUG
 
 #define GPM_ENCODER_TARGET_LEVEL 64
-#define MAX_TARGET_LEVEL 64
+#define MAX_TARGET_LEVEL 40
 
 CDrcPresModeWrapper::CDrcPresModeWrapper()
 {
@@ -164,7 +164,7 @@
     if (mDataUpdate) {
         // sanity check
         if (mDesTarget < MAX_TARGET_LEVEL){
-            mDesTarget = MAX_TARGET_LEVEL;  // limit target level to -16 dB or below
+            mDesTarget = MAX_TARGET_LEVEL;  // limit target level to -10 dB or below
             newTarget = MAX_TARGET_LEVEL;
         }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index e0c0c32..ecd2512 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -36,6 +36,7 @@
 #define DRC_DEFAULT_MOBILE_DRC_CUT   127 /* maximum compression of dynamic range for mobile conf */
 #define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
 #define DRC_DEFAULT_MOBILE_DRC_HEAVY 1   /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_EFFECT 3  /* MPEG-D DRC effect type; 3 => Limited playback range */
 #define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
 #define MAX_CHANNEL_COUNT            8  /* maximum number of audio channels that can be decoded */
 // names of properties that can be used to override the default DRC settings
@@ -44,6 +45,7 @@
 #define PROP_DRC_OVERRIDE_BOOST      "aac_drc_boost"
 #define PROP_DRC_OVERRIDE_HEAVY      "aac_drc_heavy"
 #define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT     "aac_drc_effect_type"
 
 namespace android {
 
@@ -207,6 +209,17 @@
     } else {
         mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, DRC_DEFAULT_MOBILE_ENC_LEVEL);
     }
+    // AAC_UNIDRC_SET_EFFECT
+    int32_t effectType = DRC_DEFAULT_MOBILE_DRC_EFFECT;
+    // FIXME can't read default property for DRC effect type
+    //int32_t effectType =
+    //        property_get_int32(PROP_DRC_OVERRIDE_EFFECT, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+    if (effectType < -1 || effectType > 8) {
+        effectType = DRC_DEFAULT_MOBILE_DRC_EFFECT;
+    }
+    ALOGV("AAC decoder using MPEG-D DRC effect type %d (default=%d)",
+            effectType, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+    aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, effectType);
 
     // By default, the decoder creates a 5.1 channel downmix signal.
     // For seven and eight channel input streams, enable 6.1 and 7.1 channel output
@@ -414,10 +427,10 @@
             return OMX_ErrorNone;
         }
 
-        case OMX_IndexParamAudioAndroidAacPresentation:
+        case OMX_IndexParamAudioAndroidAacDrcPresentation:
         {
-            const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *aacPresParams =
-                    (const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *)params;
+            const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+                    (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
 
             if (!isValidOMXParam(aacPresParams)) {
                 return OMX_ErrorBadParameter;
@@ -443,6 +456,10 @@
                 ALOGV("set nMaxOutputChannels=%d", max);
                 aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, max);
             }
+            if (aacPresParams->nDrcEffectType >= -1) {
+                ALOGV("set nDrcEffectType=%d", aacPresParams->nDrcEffectType);
+                aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, aacPresParams->nDrcEffectType);
+            }
             bool updateDrcWrapper = false;
             if (aacPresParams->nDrcBoost >= 0) {
                 ALOGV("set nDrcBoost=%d", aacPresParams->nDrcBoost);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 8f349fc..9bdf895 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -484,6 +484,9 @@
         // Base URL must be absolute
         return false;
     }
+    if (!strncasecmp("data:", url, 5)) {
+        return false;
+    }
     const size_t schemeEnd = (strstr(baseURL, "//") - baseURL) + 2;
     CHECK(schemeEnd == 7 || schemeEnd == 8);
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 5624f4a..f292c47 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -33,6 +33,7 @@
 #include <media/stagefright/foundation/ByteUtils.h>
 #include <media/stagefright/foundation/MediaKeys.h>
 #include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/DataURISource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MetaDataUtils.h>
@@ -347,6 +348,16 @@
     sp<ABuffer> key;
     if (index >= 0) {
         key = mAESKeyForURI.valueAt(index);
+    } else if (keyURI.startsWith("data:")) {
+        sp<DataSource> keySrc = DataURISource::Create(keyURI.c_str());
+        off64_t keyLen;
+        if (keySrc == NULL || keySrc->getSize(&keyLen) != OK || keyLen < 0) {
+            ALOGE("Malformed cipher key data uri.");
+            return ERROR_MALFORMED;
+        }
+        key = new ABuffer(keyLen);
+        keySrc->readAt(0, key->data(), keyLen);
+        key->setRange(0, keyLen);
     } else {
         ssize_t err = mHTTPDownloader->fetchFile(keyURI.c_str(), &key);
 
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index b67e928..f6d4727 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -44,16 +44,16 @@
                 mDstFormat(OMX_COLOR_Format16bitRGB565),
                 mDstBpp(2) {}
 
-    VideoFrame* extractFrame(int64_t frameTimeUs, int option, int colorFormat);
+    sp<IMemory> extractFrame(int64_t frameTimeUs, int option, int colorFormat);
 
     status_t extractFrames(
             int64_t frameTimeUs,
             size_t numFrames,
             int option,
             int colorFormat,
-            std::vector<VideoFrame*>* frames);
+            std::vector<sp<IMemory> >* frames);
 
-    static VideoFrame* getMetadataOnly(
+    static sp<IMemory> getMetadataOnly(
             const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
 
 protected:
@@ -81,8 +81,8 @@
     OMX_COLOR_FORMATTYPE dstFormat() const  { return mDstFormat; }
     int32_t dstBpp()             const      { return mDstBpp; }
 
-    void addFrame(VideoFrame *frame) {
-        mFrames.push_back(std::unique_ptr<VideoFrame>(frame));
+    void addFrame(const sp<IMemory> &frame) {
+        mFrames.push_back(frame);
     }
 
 private:
@@ -91,7 +91,7 @@
     sp<IMediaSource> mSource;
     OMX_COLOR_FORMATTYPE mDstFormat;
     int32_t mDstBpp;
-    std::vector<std::unique_ptr<VideoFrame> > mFrames;
+    std::vector<sp<IMemory> > mFrames;
 
     static bool getDstColorFormat(
             android_pixel_format_t colorFormat,
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 8443fbe..209f850 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -40,12 +40,12 @@
     virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
     virtual status_t setDataSource(const sp<DataSource>& source, const char *mime);
 
-    virtual VideoFrame* getFrameAtTime(
+    virtual sp<IMemory> getFrameAtTime(
             int64_t timeUs, int option, int colorFormat, bool metaOnly);
-    virtual VideoFrame* getImageAtIndex(
+    virtual sp<IMemory> getImageAtIndex(
             int index, int colorFormat, bool metaOnly, bool thumbnail);
     virtual status_t getFrameAtIndex(
-            std::vector<VideoFrame*>* frames,
+            std::vector<sp<IMemory> >* frames,
             int frameIndex, int numFrames, int colorFormat, bool metaOnly);
 
     virtual MediaAlbumArt *extractAlbumArt();
@@ -65,7 +65,7 @@
 
     status_t getFrameInternal(
             int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-            VideoFrame **outFrame, std::vector<VideoFrame*>* outFrames);
+            sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
 
     StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
 
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 64caeed..97d15a7 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -446,6 +446,7 @@
         int32_t heavyCompression;
         int32_t targetRefLevel;
         int32_t encodedTargetLevel;
+        int32_t effectType;
     } drcParams_t;
 
     status_t setupAACCodec(
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index fe08ab9..6d10f1c 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -224,7 +224,7 @@
 
 static bool findId(AMediaDrm *mObj, const AMediaDrmByteArray &id, List<idvec_t>::iterator &iter) {
     for (iter = mObj->mIds.begin(); iter != mObj->mIds.end(); ++iter) {
-        if (iter->array() == id.ptr && iter->size() == id.length) {
+        if (id.length == iter->size() && memcmp(iter->array(), id.ptr, iter->size()) == 0) {
             return true;
         }
     }
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index 1d295e4..f7b9cfd 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -216,12 +216,6 @@
 
 #endif /* __ANDROID_API__ >= 28 */
 
-#if __ANDROID_API__ >= 29
-
-media_status_t AMediaExtractor_disconnect(AMediaExtractor *ex);
-
-#endif /* __ANDROID_API__ >= 29 */
-
 #endif /* __ANDROID_API__ >= 21 */
 
 __END_DECLS
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index ef466a2..79bb9fe 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -216,11 +216,6 @@
             mWarmupNsMax = LONG_MAX;
         }
         mMixerBufferState = UNDEFINED;
-#if !LOG_NDEBUG
-        for (unsigned i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
-            mFastTrackNames[i] = -1;
-        }
-#endif
         // we need to reconfigure all active tracks
         previousTrackMask = 0;
         mFastTracksGen = current->mFastTracksGen - 1;
@@ -245,9 +240,6 @@
             if (mMixer != NULL) {
                 mMixer->destroy(i);
             }
-#if !LOG_NDEBUG
-            mFastTrackNames[i] = -1;
-#endif
             // don't reset track dump state, since other side is ignoring it
             mGenerations[i] = fastTrack->mGeneration;
         }
@@ -259,7 +251,6 @@
             addedTracks &= ~(1 << i);
             const FastTrack* fastTrack = &current->mFastTracks[i];
             AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-            ALOG_ASSERT(bufferProvider != NULL && mFastTrackNames[i] == -1);
             if (mMixer != NULL) {
                 const int name = i; // for clarity, choose name as fast track index.
                 status_t status = mMixer->create(
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 20de97c..c47aa01 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6603,8 +6603,17 @@
         if (mPipeSource != 0) {
             size_t framesToRead = mBufferSize / mFrameSize;
             framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
-            framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
-                    framesToRead);
+
+            // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
+            // to the full buffer point (clearing the overflow condition).  Upon OVERRUN error,
+            // we immediately retry the read() to get data and prevent another overflow.
+            for (int retries = 0; retries <= 2; ++retries) {
+                ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
+                framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
+                        framesToRead);
+                if (framesRead != OVERRUN) break;
+            }
+
             // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
             // buffer size or at least for 20ms.
             size_t sleepFrames = max(
diff --git a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
index fc2c273..fac6cbe 100644
--- a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
@@ -54,7 +54,7 @@
 
     void log(const char* prefix);
 
-    bool isActive() {
+    bool isActiveOrChanged() {
         return (mDeviceDescriptor != 0) && (mChanged || (mActivityCount > 0));
     }
 
@@ -96,7 +96,7 @@
 
     int incRouteActivity(audio_session_t session);
     int decRouteActivity(audio_session_t session);
-    bool hasRouteChanged(audio_session_t session); // also clears the changed flag
+    bool getAndClearRouteChanged(audio_session_t session); // also clears the changed flag
     void log(const char* caption);
 
     // Specify an Output(Sink) route by passing SessionRoute::SOURCE_TYPE_NA in the
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index 689f4e6..8edd4d1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -40,7 +40,7 @@
     return indexOfKey(session) >= 0 && valueFor(session)->mDeviceDescriptor != 0;
 }
 
-bool SessionRouteMap::hasRouteChanged(audio_session_t session)
+bool SessionRouteMap::getAndClearRouteChanged(audio_session_t session)
 {
     if (indexOfKey(session) >= 0) {
         if (valueFor(session)->mChanged) {
@@ -104,9 +104,7 @@
     sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
 
     if (route != 0) {
-        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
-                ((route->mDeviceDescriptor != 0) &&
-                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) {
+        if (descriptor != 0 || route->mDeviceDescriptor != 0) {
             route->mChanged = true;
         }
         route->mRefCount++;
@@ -114,10 +112,10 @@
     } else {
         route = new SessionRoute(session, streamType, source, descriptor, uid);
         route->mRefCount++;
-        add(session, route);
         if (descriptor != 0) {
             route->mChanged = true;
         }
+        add(session, route);
     }
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index bb00c3f..264e709 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -822,7 +822,7 @@
           "flags %#x",
           device, config->sample_rate, config->format, config->channel_mask, *flags);
 
-    *output = getOutputForDevice(device, session, *stream, *output, config, flags);
+    *output = getOutputForDevice(device, session, *stream, config, flags);
     if (*output == AUDIO_IO_HANDLE_NONE) {
         mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
@@ -841,11 +841,10 @@
         audio_devices_t device,
         audio_session_t session,
         audio_stream_type_t stream,
-        audio_io_handle_t originalOutput,
         const audio_config_t *config,
         audio_output_flags_t *flags)
 {
-    audio_io_handle_t output = originalOutput;
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
     status_t status;
 
     // open a direct output if required by specified parameters
@@ -909,22 +908,20 @@
     }
 
     if (profile != 0) {
-        // exclude MMAP streams
-        if ((*flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0 || output != AUDIO_IO_HANDLE_NONE) {
-            for (size_t i = 0; i < mOutputs.size(); i++) {
-                sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-                if (!desc->isDuplicated() && (profile == desc->mProfile)) {
-                    // reuse direct output if currently open by the same client
-                    // and configured with same parameters
-                    if ((config->sample_rate == desc->mSamplingRate) &&
-                        audio_formats_match(config->format, desc->mFormat) &&
-                        (config->channel_mask == desc->mChannelMask) &&
-                        (session == desc->mDirectClientSession)) {
-                        desc->mDirectOpenCount++;
-                        ALOGI("getOutputForDevice() reusing direct output %d for session %d",
-                              mOutputs.keyAt(i), session);
-                        return mOutputs.keyAt(i);
-                    }
+        // exclusive outputs for MMAP and Offload are enforced by different session ids.
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (profile == desc->mProfile)) {
+                // reuse direct output if currently open by the same client
+                // and configured with same parameters
+                if ((config->sample_rate == desc->mSamplingRate) &&
+                    (config->format == desc->mFormat) &&
+                    (config->channel_mask == desc->mChannelMask) &&
+                    (session == desc->mDirectClientSession)) {
+                    desc->mDirectOpenCount++;
+                    ALOGI("getOutputForDevice() reusing direct output %d for session %d",
+                        mOutputs.keyAt(i), session);
+                    return mOutputs.keyAt(i);
                 }
             }
         }
@@ -945,8 +942,7 @@
         // only accept an output with the requested parameters
         if (status != NO_ERROR ||
             (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
-            (config->format != AUDIO_FORMAT_DEFAULT &&
-                    !audio_formats_match(config->format, outputDesc->mFormat)) ||
+            (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
             (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
             ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
                     "format %d %d, channel mask %04x %04x", output, config->sample_rate,
@@ -1035,7 +1031,7 @@
             // if a valid format is specified, skip output if not compatible
             if (format != AUDIO_FORMAT_INVALID) {
                 if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-                    if (!audio_formats_match(format, outputDesc->mFormat)) {
+                    if (format != outputDesc->mFormat) {
                         continue;
                     }
                 } else if (!audio_is_linear_pcm(format)) {
@@ -1116,7 +1112,7 @@
         } else {
             newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
         }
-    } else if (mOutputRoutes.hasRouteChanged(session)) {
+    } else if (mOutputRoutes.getAndClearRouteChanged(session)) {
         newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
         checkStrategyRoute(getStrategy(stream), output);
     } else {
@@ -1980,7 +1976,7 @@
     // Routing?
     mInputRoutes.incRouteActivity(session);
 
-    if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
+    if (audioSession->activeCount() == 1 || mInputRoutes.getAndClearRouteChanged(session)) {
         // indicate active capture to sound trigger service if starting capture from a mic on
         // primary HW module
         audio_devices_t device = getNewInputDevice(inputDesc);
@@ -4707,7 +4703,7 @@
     for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
         sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
         routing_strategy routeStrategy = getStrategy(route->mStreamType);
-        if ((routeStrategy == strategy) && route->isActive() &&
+        if ((routeStrategy == strategy) && route->isActiveOrChanged() &&
                 (mAvailableOutputDevices.indexOf(route->mDeviceDescriptor) >= 0)) {
             return route->mDeviceDescriptor->type();
         }
@@ -5139,7 +5135,7 @@
     // then select this device.
     for (size_t routeIndex = 0; routeIndex < mInputRoutes.size(); routeIndex++) {
          sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
-         if ((inputSource == route->mSource) && route->isActive() &&
+         if ((inputSource == route->mSource) && route->isActiveOrChanged() &&
                  (mAvailableInputDevices.indexOf(route->mDeviceDescriptor) >= 0)) {
              return route->mDeviceDescriptor->type();
          }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index d05ba1f..2b68882 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -628,7 +628,6 @@
                 audio_devices_t device,
                 audio_session_t session,
                 audio_stream_type_t stream,
-                audio_io_handle_t originalOutput,
                 const audio_config_t *config,
                 audio_output_flags_t *flags);
         // internal method to return the input handle for the given device and format
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 077e05e..66e9196 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -783,6 +783,18 @@
                 name.c_str(), statusToString(status));
         return nullptr;
     }
+
+    for (auto& conflictName : resourceCost.conflictingDevices) {
+        uint16_t major, minor;
+        std::string type, id;
+        status_t res = parseDeviceName(conflictName, &major, &minor, &type, &id);
+        if (res != OK) {
+            ALOGE("%s: Failed to parse conflicting device %s", __FUNCTION__, conflictName.c_str());
+            return nullptr;
+        }
+        conflictName = id;
+    }
+
     return std::unique_ptr<DeviceInfo>(
         new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
                 cameraInterface));
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 701ca6e..51619f6 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -25,6 +25,9 @@
 #include <media/stagefright/omx/1.0/Omx.h>
 #include <media/stagefright/omx/1.0/OmxStore.h>
 
+#include <media/CodecServiceRegistrant.h>
+#include <dlfcn.h>
+
 using namespace android;
 
 // Must match location in Android.mk.
@@ -45,20 +48,37 @@
 
     ::android::hardware::configureRpcThreadpool(64, false);
 
-    using namespace ::android::hardware::media::omx::V1_0;
-    sp<IOmxStore> omxStore = new implementation::OmxStore();
-    if (omxStore == nullptr) {
-        LOG(ERROR) << "Cannot create IOmxStore HAL service.";
-    } else if (omxStore->registerAsService() != OK) {
-        LOG(ERROR) << "Cannot register IOmxStore HAL service.";
-    }
-    sp<IOmx> omx = new implementation::Omx();
-    if (omx == nullptr) {
-        LOG(ERROR) << "Cannot create IOmx HAL service.";
-    } else if (omx->registerAsService() != OK) {
-        LOG(ERROR) << "Cannot register IOmx HAL service.";
+    // Registration of customized codec services
+    void *registrantLib = dlopen(
+            "libmedia_codecserviceregistrant.so",
+            RTLD_NOW | RTLD_LOCAL);
+    if (registrantLib) {
+        RegisterCodecServicesFunc registerCodecServices =
+                reinterpret_cast<RegisterCodecServicesFunc>(
+                dlsym(registrantLib, "RegisterCodecServices"));
+        if (registerCodecServices) {
+            registerCodecServices();
+        } else {
+            LOG(WARNING) << "Cannot register additional services "
+                    "-- corrupted library.";
+        }
     } else {
-        LOG(INFO) << "IOmx HAL service created.";
+        // Default codec services
+        using namespace ::android::hardware::media::omx::V1_0;
+        sp<IOmxStore> omxStore = new implementation::OmxStore();
+        if (omxStore == nullptr) {
+            LOG(ERROR) << "Cannot create IOmxStore HAL service.";
+        } else if (omxStore->registerAsService() != OK) {
+            LOG(ERROR) << "Cannot register IOmxStore HAL service.";
+        }
+        sp<IOmx> omx = new implementation::Omx();
+        if (omx == nullptr) {
+            LOG(ERROR) << "Cannot create IOmx HAL service.";
+        } else if (omx->registerAsService() != OK) {
+            LOG(ERROR) << "Cannot register IOmx HAL service.";
+        } else {
+            LOG(INFO) << "IOmx HAL service created.";
+        }
     }
 
     ::android::hardware::joinRpcThreadpool();
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 6015b28..253f290 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -49,9 +49,9 @@
 
     virtual aaudio_result_t close() = 0;
 
-    virtual aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
+    aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
 
-    virtual aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
+    aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
 
     virtual aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
                                         audio_port_handle_t *clientHandle) = 0;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index c943008..48d8002 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -105,6 +105,9 @@
             goto error;
         }
 
+        // This is not protected by a lock because the stream cannot be
+        // referenced until the service returns a handle to the client.
+        // So only one thread can open a stream.
         mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService,
                                                          request,
                                                          sharingMode);
@@ -113,6 +116,9 @@
             result = AAUDIO_ERROR_UNAVAILABLE;
             goto error;
         }
+        // Save a weak pointer that we will use to access the endpoint.
+        mServiceEndpointWeak = mServiceEndpoint;
+
         mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
         copyFrom(*mServiceEndpoint);
     }
@@ -131,13 +137,16 @@
 
     stop();
 
-    if (mServiceEndpoint == nullptr) {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
         result = AAUDIO_ERROR_INVALID_STATE;
     } else {
-        mServiceEndpoint->unregisterStream(this);
-        AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
-        mEndpointManager.closeEndpoint(mServiceEndpoint);
-        mServiceEndpoint.clear();
+        endpoint->unregisterStream(this);
+        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
+        endpointManager.closeEndpoint(endpoint);
+
+        // AAudioService::closeStream() prevents two threads from closing at the same time.
+        mServiceEndpoint.clear(); // endpoint will hold the pointer until this method returns.
     }
 
     {
@@ -153,7 +162,12 @@
 
 aaudio_result_t AAudioServiceStreamBase::startDevice() {
     mClientHandle = AUDIO_PORT_HANDLE_NONE;
-    return mServiceEndpoint->startStream(this, &mClientHandle);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    return endpoint->startStream(this, &mClientHandle);
 }
 
 /**
@@ -163,16 +177,11 @@
  */
 aaudio_result_t AAudioServiceStreamBase::start() {
     aaudio_result_t result = AAUDIO_OK;
+
     if (isRunning()) {
         return AAUDIO_OK;
     }
 
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("%s() missing endpoint", __func__);
-        result = AAUDIO_ERROR_INVALID_STATE;
-        goto error;
-    }
-
     setFlowing(false);
 
     // Start with fresh presentation timestamps.
@@ -201,10 +210,6 @@
     if (!isRunning()) {
         return result;
     }
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("%s() missing endpoint", __func__);
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
 
     // Send it now because the timestamp gets rounded up when stopStream() is called below.
     // Also we don't need the timestamps while we are shutting down.
@@ -216,7 +221,12 @@
         return result;
     }
 
-    result = mServiceEndpoint->stopStream(this, mClientHandle);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    result = endpoint->stopStream(this, mClientHandle);
     if (result != AAUDIO_OK) {
         ALOGE("%s() mServiceEndpoint returned %d, %s", __func__, result, getTypeText());
         disconnect(); // TODO should we return or pause Base first?
@@ -233,11 +243,6 @@
         return result;
     }
 
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("%s() missing endpoint", __func__);
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-
     setState(AAUDIO_STREAM_STATE_STOPPING);
 
     // Send it now because the timestamp gets rounded up when stopStream() is called below.
@@ -249,10 +254,15 @@
         return result;
     }
 
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     // TODO wait for data to be played out
-    result = mServiceEndpoint->stopStream(this, mClientHandle);
+    result = endpoint->stopStream(this, mClientHandle);
     if (result != AAUDIO_OK) {
-        ALOGE("%s() mServiceEndpoint returned %d, %s", __func__, result, getTypeText());
+        ALOGE("%s() stopStream returned %d, %s", __func__, result, getTypeText());
         disconnect();
         // TODO what to do with result here?
     }
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index d8102be..0ad015e 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -279,7 +279,12 @@
     SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
 
     android::AAudioService &mAudioService;
+
+    // The mServiceEndpoint variable can be accessed by multiple threads.
+    // So we access it by locally promoting a weak pointer to a smart pointer,
+    // which is thread-safe.
     android::sp<AAudioServiceEndpoint> mServiceEndpoint;
+    android::wp<AAudioServiceEndpoint> mServiceEndpointWeak;
 
 private:
     aaudio_handle_t         mHandle = -1;
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 34ddb4b..c845309 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -70,14 +70,19 @@
         return result;
     }
 
-    result = mServiceEndpoint->registerStream(keep);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    result = endpoint->registerStream(keep);
     if (result != AAUDIO_OK) {
-        goto error;
+        return result;
     }
 
     setState(AAUDIO_STREAM_STATE_OPEN);
 
-error:
     return AAUDIO_OK;
 }
 
@@ -118,21 +123,37 @@
 
 aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
                                                        audio_port_handle_t *clientHandle) {
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
     // Start the client on behalf of the application. Generate a new porthandle.
-    aaudio_result_t result = mServiceEndpoint->startClient(client, clientHandle);
+    aaudio_result_t result = endpoint->startClient(client, clientHandle);
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamMMAP::stopClient(audio_port_handle_t clientHandle) {
-    aaudio_result_t result = mServiceEndpoint->stopClient(clientHandle);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_result_t result = endpoint->stopClient(clientHandle);
     return result;
 }
 
 // Get free-running DSP or DMA hardware position from the HAL.
 aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
-    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
-            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+            static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
+
     aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
     if (result == AAUDIO_OK) {
         Timestamp timestamp(*positionFrames, *timeNanos);
@@ -148,8 +169,15 @@
 // Get timestamp that was written by getFreeRunningPosition()
 aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
-    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
-            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+            static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
+
     // TODO Get presentation timestamp from the HAL
     if (mAtomicTimestamp.isValid()) {
         Timestamp timestamp = mAtomicTimestamp.read();
@@ -165,7 +193,12 @@
 aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
         AudioEndpointParcelable &parcelable)
 {
-    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
-            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+            static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
     return serviceEndpointMMAP->getDownDataDescription(parcelable);
 }
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 864a008..05c5735 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -128,6 +128,12 @@
 
     const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
 
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        result = AAUDIO_ERROR_INVALID_STATE;
+        goto error;
+    }
+
     // Is the request compatible with the shared endpoint?
     setFormat(configurationInput.getFormat());
     if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
@@ -140,20 +146,20 @@
 
     setSampleRate(configurationInput.getSampleRate());
     if (getSampleRate() == AAUDIO_UNSPECIFIED) {
-        setSampleRate(mServiceEndpoint->getSampleRate());
-    } else if (getSampleRate() != mServiceEndpoint->getSampleRate()) {
+        setSampleRate(endpoint->getSampleRate());
+    } else if (getSampleRate() != endpoint->getSampleRate()) {
         ALOGD("%s() mSampleRate = %d, need %d",
-              __func__, getSampleRate(), mServiceEndpoint->getSampleRate());
+              __func__, getSampleRate(), endpoint->getSampleRate());
         result = AAUDIO_ERROR_INVALID_RATE;
         goto error;
     }
 
     setSamplesPerFrame(configurationInput.getSamplesPerFrame());
     if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
-        setSamplesPerFrame(mServiceEndpoint->getSamplesPerFrame());
-    } else if (getSamplesPerFrame() != mServiceEndpoint->getSamplesPerFrame()) {
+        setSamplesPerFrame(endpoint->getSamplesPerFrame());
+    } else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
         ALOGD("%s() mSamplesPerFrame = %d, need %d",
-              __func__, getSamplesPerFrame(), mServiceEndpoint->getSamplesPerFrame());
+              __func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
         result = AAUDIO_ERROR_OUT_OF_RANGE;
         goto error;
     }
@@ -179,7 +185,10 @@
         }
     }
 
-    result = mServiceEndpoint->registerStream(keep);
+    ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
+          getSampleRate(), getSamplesPerFrame(), endpoint->getDeviceId());
+
+    result = endpoint->registerStream(keep);
     if (result != AAUDIO_OK) {
         goto error;
     }
@@ -246,7 +255,13 @@
                                                                 int64_t *timeNanos) {
 
     int64_t position = 0;
-    aaudio_result_t result = mServiceEndpoint->getTimestamp(&position, timeNanos);
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        ALOGE("%s() has no endpoint", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
+    aaudio_result_t result = endpoint->getTimestamp(&position, timeNanos);
     if (result == AAUDIO_OK) {
         int64_t offset = mTimestampPositionOffset.load();
         // TODO, do not go below starting value