Merge "AAudio: Set minimum allowed sampling rate to 8000 Hz" into oc-dev
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index 1d835f9..bf04a89 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -88,14 +88,9 @@
 }
 
 status_t DrmManager::loadPlugIns() {
-
-    String8 vendorPluginDirPath("/vendor/lib/drm");
-    loadPlugIns(vendorPluginDirPath);
-
     String8 pluginDirPath("/system/lib/drm");
     loadPlugIns(pluginDirPath);
     return DRM_NO_ERROR;
-
 }
 
 status_t DrmManager::loadPlugIns(const String8& plugInDirPath) {
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 3596f12..f54954a 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -410,6 +410,7 @@
     }
 
     setListener(NULL);
+    mPlugin->setListener(NULL);
     mPlugin.clear();
 
     return OK;
diff --git a/media/libaaudio/examples/loopback/Android.mk b/media/libaaudio/examples/loopback/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/libaaudio/examples/loopback/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
new file mode 100644
index 0000000..dc933e3
--- /dev/null
+++ b/media/libaaudio/examples/loopback/jni/Android.mk
@@ -0,0 +1,13 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/libaaudio/include
+
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/loopback.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := aaudio_loopback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/loopback/jni/Application.mk b/media/libaaudio/examples/loopback/jni/Application.mk
new file mode 100644
index 0000000..ba44f37
--- /dev/null
+++ b/media/libaaudio/examples/loopback/jni/Application.mk
@@ -0,0 +1 @@
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
new file mode 100644
index 0000000..bad21f7
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -0,0 +1,528 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play an impulse and then record it.
+// Measure the round trip latency.
+
+#include <assert.h>
+#include <cctype>
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+#define INPUT_PEAK_THRESHOLD    0.1f
+#define SILENCE_FRAMES          10000
+#define SAMPLE_RATE             48000
+#define NUM_SECONDS             7
+#define FILENAME                "/data/oboe_input.raw"
+
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define MILLIS_PER_SECOND     1000
+#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
+
+class AudioRecorder
+{
+public:
+    AudioRecorder() {
+    }
+    ~AudioRecorder() {
+        delete[] mData;
+    }
+
+    void allocate(int maxFrames) {
+        delete[] mData;
+        mData = new float[maxFrames];
+        mMaxFrames = maxFrames;
+    }
+
+    void record(int16_t *inputData, int inputChannelCount, int numFrames) {
+        // stop at end of buffer
+        if ((mFrameCounter + numFrames) > mMaxFrames) {
+            numFrames = mMaxFrames - mFrameCounter;
+        }
+        for (int i = 0; i < numFrames; i++) {
+            mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
+        }
+    }
+
+    void record(float *inputData, int inputChannelCount, int numFrames) {
+        // stop at end of buffer
+        if ((mFrameCounter + numFrames) > mMaxFrames) {
+            numFrames = mMaxFrames - mFrameCounter;
+        }
+        for (int i = 0; i < numFrames; i++) {
+            mData[mFrameCounter++] = inputData[i * inputChannelCount];
+        }
+    }
+
+    int save(const char *fileName) {
+        FILE *fid = fopen(fileName, "wb");
+        if (fid == NULL) {
+            return errno;
+        }
+        int written = fwrite(mData, sizeof(float), mFrameCounter, fid);
+        fclose(fid);
+        return written;
+    }
+
+private:
+    float *mData = NULL;
+    int32_t mFrameCounter = 0;
+    int32_t mMaxFrames = 0;
+};
+
+// ====================================================================================
+// ========================= Loopback Processor =======================================
+// ====================================================================================
+class LoopbackProcessor {
+public:
+
+    // Calculate mean and standard deviation.
+    double calculateAverageLatency(double *deviation) {
+        if (mLatencyCount <= 0) {
+            return -1.0;
+        }
+        double sum = 0.0;
+        for (int i = 0; i < mLatencyCount; i++) {
+            sum += mLatencyArray[i];
+        }
+        double average = sum /  mLatencyCount;
+        sum = 0.0;
+        for (int i = 0; i < mLatencyCount; i++) {
+            double error = average - mLatencyArray[i];
+            sum += error * error; // squared
+        }
+        *deviation = sqrt(sum / mLatencyCount);
+        return average;
+    }
+
+    float getMaxAmplitude() const { return mMaxAmplitude; }
+    int   getMeasurementCount() const { return mLatencyCount; }
+    float getAverageAmplitude() const { return mAmplitudeTotal / mAmplitudeCount; }
+
+    // TODO Convert this to a feedback circuit and then use auto-correlation to measure the period.
+    void process(float *inputData, int inputChannelCount,
+            float *outputData, int outputChannelCount,
+            int numFrames) {
+        (void) outputChannelCount;
+
+        // Measure peak and average amplitude.
+        for (int i = 0; i < numFrames; i++) {
+            float sample = inputData[i * inputChannelCount];
+            if (sample > mMaxAmplitude) {
+                mMaxAmplitude = sample;
+            }
+            if (sample < 0) {
+                sample = 0 - sample;
+            }
+            mAmplitudeTotal += sample;
+            mAmplitudeCount++;
+        }
+
+        // Clear output.
+        memset(outputData, 0, numFrames * outputChannelCount * sizeof(float));
+
+        // Wait a while between hearing the pulse and starting a new one.
+        if (mState == STATE_SILENT) {
+            mCounter += numFrames;
+            if (mCounter > SILENCE_FRAMES) {
+                //printf("LoopbackProcessor send impulse, burst #%d\n", mBurstCounter);
+                // copy impulse
+                for (float sample : mImpulse) {
+                    *outputData = sample;
+                    outputData += outputChannelCount;
+                }
+                mState = STATE_LISTENING;
+                mCounter = 0;
+            }
+        }
+        // Start listening as soon as we send the impulse.
+        if (mState ==  STATE_LISTENING) {
+            for (int i = 0; i < numFrames; i++) {
+                float sample = inputData[i * inputChannelCount];
+                if (sample >= INPUT_PEAK_THRESHOLD) {
+                    mLatencyArray[mLatencyCount++] = mCounter;
+                    if (mLatencyCount >= MAX_LATENCY_VALUES) {
+                        mState = STATE_DONE;
+                    } else {
+                        mState = STATE_SILENT;
+                    }
+                    mCounter = 0;
+                    break;
+                } else {
+                    mCounter++;
+                }
+            }
+        }
+    }
+
+    void echo(float *inputData, int inputChannelCount,
+            float *outputData, int outputChannelCount,
+            int numFrames) {
+        int channelsValid = (inputChannelCount < outputChannelCount)
+            ? inputChannelCount : outputChannelCount;
+        for (int i = 0; i < numFrames; i++) {
+            int ic;
+            for (ic = 0; ic < channelsValid; ic++) {
+                outputData[ic] = inputData[ic];
+            }
+            for (ic = 0; ic < outputChannelCount; ic++) {
+                outputData[ic] = 0;
+            }
+            inputData += inputChannelCount;
+            outputData += outputChannelCount;
+        }
+    }
+private:
+    enum {
+        STATE_SILENT,
+        STATE_LISTENING,
+        STATE_DONE
+    };
+
+    enum {
+        MAX_LATENCY_VALUES = 64
+    };
+
+    int     mState = STATE_SILENT;
+    int32_t mCounter = 0;
+    int32_t mLatencyArray[MAX_LATENCY_VALUES];
+    int32_t mLatencyCount = 0;
+    float   mMaxAmplitude = 0;
+    float   mAmplitudeTotal = 0;
+    int32_t mAmplitudeCount = 0;
+    static const float mImpulse[5];
+};
+
+const float LoopbackProcessor::mImpulse[5] = {0.5f, 0.9f, 0.0f, -0.9f, -0.5f};
+
+// TODO make this a class that manages its own buffer allocation
+struct LoopbackData {
+    AAudioStream     *inputStream = nullptr;
+    int32_t           inputFramesMaximum = 0;
+    int16_t          *inputData = nullptr;
+    float            *conversionBuffer = nullptr;
+    int32_t           actualInputChannelCount = 0;
+    int32_t           actualOutputChannelCount = 0;
+    int32_t           inputBuffersToDiscard = 10;
+
+    aaudio_result_t   inputError;
+    LoopbackProcessor loopbackProcessor;
+    AudioRecorder     audioRecorder;
+};
+
+static void convertPcm16ToFloat(const int16_t *source,
+                                float *destination,
+                                int32_t numSamples) {
+    const float scaler = 1.0f / 32768.0f;
+    for (int i = 0; i < numSamples; i++) {
+        destination[i] = source[i] * scaler;
+    }
+}
+
+// ====================================================================================
+// ========================= CALLBACK =================================================
+// ====================================================================================
+// Callback function that fills the audio output buffer.
+static aaudio_data_callback_result_t MyDataCallbackProc(
+        AAudioStream *outputStream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames
+) {
+    (void) outputStream;
+    LoopbackData *myData = (LoopbackData *) userData;
+    float  *outputData = (float  *) audioData;
+
+    // Read audio data from the input stream.
+    int32_t framesRead;
+
+    if (numFrames > myData->inputFramesMaximum) {
+        myData->inputError = AAUDIO_ERROR_OUT_OF_RANGE;
+        return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+
+    if (myData->inputBuffersToDiscard > 0) {
+        // Drain the input.
+        do {
+            framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
+                                       numFrames, 0);
+            if (framesRead < 0) {
+                myData->inputError = framesRead;
+            } else if (framesRead > 0) {
+                myData->inputBuffersToDiscard--;
+            }
+        } while(framesRead > 0);
+    } else {
+        framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
+                                       numFrames, 0);
+        if (framesRead < 0) {
+            myData->inputError = framesRead;
+        } else if (framesRead > 0) {
+            // Process valid input data.
+            myData->audioRecorder.record(myData->inputData,
+                                         myData->actualInputChannelCount,
+                                         framesRead);
+
+            int32_t numSamples = framesRead * myData->actualInputChannelCount;
+            convertPcm16ToFloat(myData->inputData, myData->conversionBuffer, numSamples);
+
+            myData->loopbackProcessor.process(myData->conversionBuffer,
+                                              myData->actualInputChannelCount,
+                                              outputData,
+                                              myData->actualOutputChannelCount,
+                                              framesRead);
+        }
+    }
+
+    return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static void usage() {
+    printf("loopback: -b{burstsPerBuffer} -p{outputPerfMode} -P{inputPerfMode}\n");
+    printf("          -b{burstsPerBuffer} for example 2 for double buffered\n");
+    printf("          -p{outputPerfMode}  set output AAUDIO_PERFORMANCE_MODE*\n");
+    printf("          -P{inputPerfMode}   set input AAUDIO_PERFORMANCE_MODE*\n");
+    printf("              n for _NONE\n");
+    printf("              l for _LATENCY\n");
+    printf("              p for _POWER_SAVING;\n");
+    printf("For example:  loopback -b2 -pl -Pn\n");
+}
+
+static aaudio_performance_mode_t parsePerformanceMode(char c) {
+    aaudio_performance_mode_t mode = AAUDIO_PERFORMANCE_MODE_NONE;
+    c = tolower(c);
+    switch (c) {
+        case 'n':
+            mode = AAUDIO_PERFORMANCE_MODE_NONE;
+            break;
+        case 'l':
+            mode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+            break;
+        case 'p':
+            mode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+            break;
+        default:
+            printf("ERROR invalue performance mode %c\n", c);
+            break;
+    }
+    return mode;
+}
+
+// ====================================================================================
+// TODO break up this large main() function into smaller functions
+int main(int argc, const char **argv)
+{
+    aaudio_result_t result = AAUDIO_OK;
+    LoopbackData loopbackData;
+    AAudioStream *outputStream = nullptr;
+
+    const int requestedInputChannelCount = 1;
+    const int requestedOutputChannelCount = AAUDIO_UNSPECIFIED;
+    const int requestedSampleRate = SAMPLE_RATE;
+    int actualSampleRate = 0;
+    const aaudio_audio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
+    const aaudio_audio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
+    aaudio_audio_format_t actualInputFormat;
+    aaudio_audio_format_t actualOutputFormat;
+
+    //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+    const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+    aaudio_sharing_mode_t       actualSharingMode;
+
+    AAudioStreamBuilder  *builder = nullptr;
+    aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    int32_t framesPerBurst = 0;
+    float *outputData = NULL;
+    double deviation;
+    double latency;
+    aaudio_performance_mode_t outputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+    aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+
+    int32_t burstsPerBuffer = 1; // single buffered
+
+    for (int i = 1; i < argc; i++) {
+        const char *arg = argv[i];
+        if (arg[0] == '-') {
+            char option = arg[1];
+            switch (option) {
+                case 'b':
+                    burstsPerBuffer = atoi(&arg[2]);
+                    break;
+                case 'p':
+                    outputPerformanceLevel = parsePerformanceMode(arg[2]);
+                    break;
+                case 'P':
+                    inputPerformanceLevel = parsePerformanceMode(arg[2]);
+                    break;
+                default:
+                    usage();
+                    break;
+            }
+        } else {
+            break;
+        }
+    }
+
+    loopbackData.audioRecorder.allocate(NUM_SECONDS * SAMPLE_RATE);
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, NULL, _IONBF, (size_t) 0);
+
+    printf("%s - Audio loopback using AAudio\n", argv[0]);
+
+    // Use an AAudioStreamBuilder to contain requested parameters.
+    result = AAudio_createStreamBuilder(&builder);
+    if (result < 0) {
+        goto finish;
+    }
+
+    // Request common stream properties.
+    AAudioStreamBuilder_setSampleRate(builder, requestedSampleRate);
+    AAudioStreamBuilder_setFormat(builder, requestedInputFormat);
+    AAudioStreamBuilder_setSharingMode(builder, requestedSharingMode);
+
+    // Open the input stream.
+    AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_INPUT);
+    AAudioStreamBuilder_setPerformanceMode(builder, inputPerformanceLevel);
+    AAudioStreamBuilder_setChannelCount(builder, requestedInputChannelCount);
+
+    result = AAudioStreamBuilder_openStream(builder, &loopbackData.inputStream);
+    printf("AAudioStreamBuilder_openStream(input) returned %d = %s\n",
+           result, AAudio_convertResultToText(result));
+    if (result < 0) {
+        goto finish;
+    }
+
+    // Create an output stream using the Builder.
+    AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_OUTPUT);
+    AAudioStreamBuilder_setFormat(builder, requestedOutputFormat);
+    AAudioStreamBuilder_setPerformanceMode(builder, outputPerformanceLevel);
+    AAudioStreamBuilder_setChannelCount(builder, requestedOutputChannelCount);
+    AAudioStreamBuilder_setDataCallback(builder, MyDataCallbackProc, &loopbackData);
+
+    result = AAudioStreamBuilder_openStream(builder, &outputStream);
+    printf("AAudioStreamBuilder_openStream(output) returned %d = %s\n",
+           result, AAudio_convertResultToText(result));
+    if (result != AAUDIO_OK) {
+        goto finish;
+    }
+
+    printf("Stream INPUT ---------------------\n");
+    loopbackData.actualInputChannelCount = AAudioStream_getChannelCount(loopbackData.inputStream);
+    printf("    channelCount: requested = %d, actual = %d\n", requestedInputChannelCount,
+           loopbackData.actualInputChannelCount);
+    printf("    framesPerBurst = %d\n", AAudioStream_getFramesPerBurst(loopbackData.inputStream));
+
+    actualInputFormat = AAudioStream_getFormat(loopbackData.inputStream);
+    printf("    dataFormat: requested = %d, actual = %d\n", requestedInputFormat, actualInputFormat);
+    assert(actualInputFormat == AAUDIO_FORMAT_PCM_I16);
+
+    printf("Stream OUTPUT ---------------------\n");
+    // Check to see what kind of stream we actually got.
+    actualSampleRate = AAudioStream_getSampleRate(outputStream);
+    printf("    sampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
+
+    loopbackData.actualOutputChannelCount = AAudioStream_getChannelCount(outputStream);
+    printf("    channelCount: requested = %d, actual = %d\n", requestedOutputChannelCount,
+           loopbackData.actualOutputChannelCount);
+
+    actualSharingMode = AAudioStream_getSharingMode(outputStream);
+    printf("    sharingMode: requested = %d, actual = %d\n", requestedSharingMode, actualSharingMode);
+
+    // This is the number of frames that are read in one chunk by a DMA controller
+    // or a DSP or a mixer.
+    framesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
+    printf("    framesPerBurst = %d\n", framesPerBurst);
+
+    printf("    bufferCapacity = %d\n", AAudioStream_getBufferCapacityInFrames(outputStream));
+
+    actualOutputFormat = AAudioStream_getFormat(outputStream);
+    printf("    dataFormat: requested = %d, actual = %d\n", requestedOutputFormat, actualOutputFormat);
+    assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
+
+    // Allocate a buffer for the audio data.
+    loopbackData.inputFramesMaximum = 32 * framesPerBurst;
+
+    loopbackData.inputData = new int16_t[loopbackData.inputFramesMaximum * loopbackData.actualInputChannelCount];
+    loopbackData.conversionBuffer = new float[loopbackData.inputFramesMaximum *
+                                              loopbackData.actualInputChannelCount];
+
+    result = AAudioStream_setBufferSizeInFrames(outputStream, burstsPerBuffer * framesPerBurst);
+    if (result < 0) { // may be positive buffer size
+        fprintf(stderr, "ERROR - AAudioStream_setBufferSize() returned %d\n", result);
+        goto finish;
+    }
+    printf("AAudioStream_setBufferSize() actual = %d\n",result);
+
+    // Start output first so input stream runs low.
+    result = AAudioStream_requestStart(outputStream);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
+                result, AAudio_convertResultToText(result));
+        goto finish;
+    }
+
+    result = AAudioStream_requestStart(loopbackData.inputStream);
+    if (result != AAUDIO_OK) {
+        fprintf(stderr, "ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
+                result, AAudio_convertResultToText(result));
+        goto finish;
+    }
+
+    printf("------- sleep while the callback runs --------------\n");
+    fflush(stdout);
+    sleep(NUM_SECONDS);
+
+
+    printf("input error = %d = %s\n",
+                loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
+
+    printf("AAudioStream_getXRunCount %d\n", AAudioStream_getXRunCount(outputStream));
+    printf("framesRead    = %d\n", (int) AAudioStream_getFramesRead(outputStream));
+    printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(outputStream));
+
+    latency = loopbackData.loopbackProcessor.calculateAverageLatency(&deviation);
+    printf("measured peak    = %8.5f\n", loopbackData.loopbackProcessor.getMaxAmplitude());
+    printf("threshold        = %8.5f\n", INPUT_PEAK_THRESHOLD);
+    printf("measured average = %8.5f\n", loopbackData.loopbackProcessor.getAverageAmplitude());
+    printf("# latency measurements = %d\n", loopbackData.loopbackProcessor.getMeasurementCount());
+    printf("measured latency = %8.2f +/- %4.5f frames\n", latency, deviation);
+    printf("measured latency = %8.2f msec  <===== !!\n", (1000.0 * latency / actualSampleRate));
+
+    {
+        int written = loopbackData.audioRecorder.save(FILENAME);
+        printf("wrote %d samples to %s\n", written, FILENAME);
+    }
+
+finish:
+    AAudioStream_close(outputStream);
+    AAudioStream_close(loopbackData.inputStream);
+    delete[] loopbackData.conversionBuffer;
+    delete[] loopbackData.inputData;
+    delete[] outputData;
+    AAudioStreamBuilder_delete(builder);
+
+    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/write_sine/jni/Application.mk b/media/libaaudio/examples/write_sine/jni/Application.mk
index e74475c..ba44f37 100644
--- a/media/libaaudio/examples/write_sine/jni/Application.mk
+++ b/media/libaaudio/examples/write_sine/jni/Application.mk
@@ -1,3 +1 @@
-# TODO remove then when we support other architectures
-APP_ABI := arm64-v8a
 APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 1a66f35..20a981b 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -82,15 +82,17 @@
         result = AAudio_createStreamBuilder(&mBuilder);
         if (result != AAUDIO_OK) return result;
 
+        //AAudioStreamBuilder_setSampleRate(mBuilder, 44100);
         AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
         AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
         AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
- //       AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+        //AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
         AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);
 
-        //AAudioStreamBuilder_setPerformanceMode(mBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
-        AAudioStreamBuilder_setPerformanceMode(mBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
-        //AAudioStreamBuilder_setPerformanceMode(mBuilder, AAUDIO_PERFORMANCE_MODE_POWER_SAVING);
+        //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_NONE;
+        aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+        //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+        AAudioStreamBuilder_setPerformanceMode(mBuilder, perfMode);
 
         // Open an AAudioStream using the Builder.
         result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
@@ -102,6 +104,8 @@
                AAudioStream_getBufferSizeInFrames(mStream));
         printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
                AAudioStream_getBufferCapacityInFrames(mStream));
+        printf("AAudioStream_getPerformanceMode() = %d, requested %d\n",
+               AAudioStream_getPerformanceMode(mStream), perfMode);
 
      finish1:
         AAudioStreamBuilder_delete(mBuilder);
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index 3f1bba3..435b30f 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -22,6 +22,7 @@
 #include <binder/IServiceManager.h>
 #include <utils/Mutex.h>
 #include <utils/RefBase.h>
+#include <utils/Singleton.h>
 
 #include <aaudio/AAudio.h>
 
@@ -47,6 +48,8 @@
 static android::Mutex gServiceLock;
 static sp<IAAudioService>  gAAudioService;
 
+ANDROID_SINGLETON_STATIC_INSTANCE(AAudioBinderClient);
+
 // TODO Share code with other service clients.
 // Helper function to get access to the "AAudioService" service.
 // This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
@@ -81,7 +84,8 @@
 }
 
 AAudioBinderClient::AAudioBinderClient()
-        : AAudioServiceInterface() {}
+        : AAudioServiceInterface()
+        , Singleton<AAudioBinderClient>() {}
 
 AAudioBinderClient::~AAudioBinderClient() {}
 
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index ca2da29..e223376 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AAUDIO_AAUDIO_BINDER_CLIENT_H
 #define ANDROID_AAUDIO_AAUDIO_BINDER_CLIENT_H
 
+#include <utils/Singleton.h>
+
 #include <aaudio/AAudio.h>
 #include "AAudioServiceDefinitions.h"
 #include "AAudioServiceInterface.h"
@@ -30,7 +32,8 @@
 
 namespace aaudio {
 
-class AAudioBinderClient : public AAudioServiceInterface {
+class AAudioBinderClient : public AAudioServiceInterface
+        , public android::Singleton<AAudioBinderClient> {
 
 public:
 
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 027d66d..e6751c49 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -182,6 +182,15 @@
     mDownDataQueue->getEmptyRoomAvailable(wrappingBuffer);
 }
 
+int32_t AudioEndpoint::getEmptyFramesAvailable() {
+    return mDownDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+}
+
+int32_t AudioEndpoint::getFullFramesAvailable()
+{
+    return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+}
+
 void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
     mDownDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
 }
@@ -227,7 +236,3 @@
     return (int32_t)mDownDataQueue->getBufferCapacityInFrames();
 }
 
-int32_t AudioEndpoint::getFullFramesAvailable()
-{
-    return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
-}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 46a3fc5..3a2099f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -56,6 +56,9 @@
 
     void getEmptyRoomAvailable(android::WrappingBuffer *wrappingBuffer);
 
+    int32_t getEmptyFramesAvailable();
+    int32_t getFullFramesAvailable();
+
     void advanceWriteIndex(int32_t deltaFrames);
 
     /**
@@ -81,8 +84,6 @@
 
     int32_t getBufferCapacityInFrames() const;
 
-    int32_t getFullFramesAvailable();
-
 private:
     android::FifoBuffer    *mUpCommandQueue;
     android::FifoBuffer    *mDownDataQueue;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index eee860e..143d4b7 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -18,6 +18,8 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
 #include <stdint.h>
 #include <assert.h>
 
@@ -25,6 +27,7 @@
 
 #include <aaudio/AAudio.h>
 #include <utils/String16.h>
+#include <utils/Trace.h>
 
 #include "AudioClock.h"
 #include "AudioEndpointParcelable.h"
@@ -188,11 +191,25 @@
     ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
              mServiceStreamHandle);
     if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
+        // Don't close a stream while it is running.
+        aaudio_stream_state_t currentState = getState();
+        if (isPlaying()) {
+            requestStop();
+            aaudio_stream_state_t nextState;
+            int64_t timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+            aaudio_result_t result = waitForStateChange(currentState, &nextState,
+                                                       timeoutNanoseconds);
+            if (result != AAUDIO_OK) {
+                ALOGE("AudioStreamInternal::close() waitForStateChange() returned %d %s",
+                result, AAudio_convertResultToText(result));
+            }
+        }
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
         mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
 
         mServiceInterface.closeStream(serviceStreamHandle);
         delete[] mCallbackBuffer;
+        mCallbackBuffer = nullptr;
         return mEndPointParcelable.close();
     } else {
         return AAUDIO_ERROR_INVALID_HANDLE;
@@ -524,6 +541,8 @@
 aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
                                          int64_t timeoutNanoseconds)
 {
+    const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
+    ATRACE_BEGIN(traceName);
     aaudio_result_t result = AAUDIO_OK;
     int32_t loopCount = 0;
     uint8_t* source = (uint8_t*)buffer;
@@ -531,6 +550,12 @@
     int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
     int32_t framesLeft = numFrames;
 
+    int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+    if (ATRACE_ENABLED()) {
+        const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
+        ATRACE_INT(traceName, fullFrames);
+    }
+
     // Write until all the data has been written or until a timeout occurs.
     while (framesLeft > 0) {
         // The call to writeNow() will not block. It will just write as much as it can.
@@ -568,6 +593,7 @@
 
     // return error or framesWritten
     (void) loopCount;
+    ATRACE_END();
     return (result < 0) ? result : numFrames - framesLeft;
 }
 
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index f313b58..30e7eba 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -30,12 +30,6 @@
 #include "legacy/AudioStreamRecord.h"
 #include "legacy/AudioStreamTrack.h"
 
-// Enable a mixer in AAudio service that will mix streams to an ALSA MMAP buffer.
-#define MMAP_SHARED_ENABLED      0
-
-// Enable AAUDIO_SHARING_MODE_EXCLUSIVE that uses an ALSA MMAP buffer directly.
-#define MMAP_EXCLUSIVE_ENABLED   0
-
 using namespace aaudio;
 
 /*
@@ -53,6 +47,7 @@
                                          AudioStream **audioStreamPtr) {
     *audioStreamPtr = nullptr;
     aaudio_result_t result = AAUDIO_OK;
+
     switch (direction) {
 
         case AAUDIO_DIRECTION_INPUT:
@@ -66,9 +61,7 @@
 
         case AAUDIO_DIRECTION_OUTPUT:
             if (tryMMap) {
-                // TODO use a singleton for the AAudioBinderClient
-                AAudioBinderClient *aaudioClient = new AAudioBinderClient();
-                *audioStreamPtr = new AudioStreamInternal(*aaudioClient, false);
+                *audioStreamPtr = new AudioStreamInternal(AAudioBinderClient::getInstance(), false);
             } else {
                 *audioStreamPtr = new AudioStreamTrack();
             }
@@ -81,20 +74,30 @@
     return result;
 }
 
+// Try to open using MMAP path if that is enabled.
+// Fall back to Legacy path is MMAP not available.
 aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
-    aaudio_sharing_mode_t sharingMode = getSharingMode();
-    if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) && (MMAP_EXCLUSIVE_ENABLED == 0)) {
-        ALOGE("AudioStreamBuilder(): EXCLUSIVE sharing mode not supported");
-        return AAUDIO_ERROR_UNAVAILABLE;
-    }
-
     AudioStream *audioStream = nullptr;
     *streamPtr = nullptr;
 
-    bool tryMMap = ((sharingMode == AAUDIO_SHARING_MODE_SHARED) && MMAP_SHARED_ENABLED) ||
-            ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) && MMAP_EXCLUSIVE_ENABLED);
+    int32_t mmapEnabled = AAudioProperty_getMMapEnabled();
+    int32_t mmapExclusiveEnabled = AAudioProperty_getMMapExclusiveEnabled();
+    ALOGD("AudioStreamBuilder(): mmapEnabled = %d, mmapExclusiveEnabled = %d",
+          mmapEnabled, mmapExclusiveEnabled);
+
+    aaudio_sharing_mode_t sharingMode = getSharingMode();
+    if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
+        && (mmapExclusiveEnabled == AAUDIO_USE_NEVER)) {
+        ALOGW("AudioStreamBuilder(): EXCLUSIVE sharing mode not supported. Use SHARED.");
+        sharingMode = AAUDIO_SHARING_MODE_SHARED;
+        setSharingMode(sharingMode);
+    }
+
+    bool allowMMap = mmapEnabled != AAUDIO_USE_NEVER;
+    bool allowLegacy = mmapEnabled != AAUDIO_USE_ALWAYS;
+
     aaudio_result_t result = builder_createStream(getDirection(), sharingMode,
-                                                  tryMMap, &audioStream);
+                                                  allowMMap, &audioStream);
     if (result == AAUDIO_OK) {
         // Open the stream using the parameters from the builder.
         result = audioStream->open(*this);
@@ -105,7 +108,7 @@
             delete audioStream;
             audioStream = nullptr;
 
-            if (isMMap) {
+            if (isMMap && allowLegacy) {
                 ALOGD("AudioStreamBuilder.build() MMAP stream did not open so try Legacy path");
                 // If MMAP stream failed to open then TRY using a legacy stream.
                 result = builder_createStream(getDirection(), sharingMode,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index eb6bfd5..a74a030 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -60,15 +60,29 @@
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
 
-    audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
-
     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
                         : builder.getBufferCapacity();
+
     // TODO implement an unspecified Android format then use that.
     audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
             ? AUDIO_FORMAT_PCM_FLOAT
             : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
 
+    audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
+    switch(getPerformanceMode()) {
+        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+            flags = (audio_input_flags_t) (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW);
+            break;
+
+        case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+        case AAUDIO_PERFORMANCE_MODE_NONE:
+        default:
+            // No flags.
+            break;
+    }
+
+    uint32_t notificationFrames = 0;
+
     // Setup the callback if there is one.
     AudioRecord::callback_t callback = nullptr;
     void *callbackData = nullptr;
@@ -77,11 +91,12 @@
         streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
         callback = getLegacyCallback();
         callbackData = this;
+        notificationFrames = builder.getFramesPerDataCallback();
     }
     mCallbackBufferSize = builder.getFramesPerDataCallback();
 
     mAudioRecord = new AudioRecord(
-            AUDIO_SOURCE_DEFAULT,
+            AUDIO_SOURCE_VOICE_RECOGNITION,
             getSampleRate(),
             format,
             channelMask,
@@ -89,7 +104,7 @@
             frameCount,
             callback,
             callbackData,
-            0,    //    uint32_t notificationFrames = 0,
+            notificationFrames,
             AUDIO_SESSION_ALLOCATE,
             streamTransferType,
             flags
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index f4a78e1..0af6457 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -29,7 +29,7 @@
 namespace aaudio {
 
 /**
- * Internal stream that uses the legacy AudioTrack path.
+ * Internal stream that uses the legacy AudioRecord path.
  */
 class AudioStreamRecord : public AudioStreamLegacy {
 public:
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index a7c7673..9c433cd 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -69,7 +69,8 @@
             samplesPerFrame, channelMask);
 
     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
-    switch(getPerformanceMode()) {
+    aaudio_performance_mode_t perfMode = getPerformanceMode();
+    switch(perfMode) {
         case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
             // Bypass the normal mixer and go straight to the FAST mixer.
             flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW);
@@ -105,12 +106,14 @@
         callback = getLegacyCallback();
         callbackData = this;
 
-        notificationFrames = builder.getFramesPerDataCallback();
         // If the total buffer size is unspecified then base the size on the burst size.
-        if (frameCount == AAUDIO_UNSPECIFIED) {
+        if (frameCount == 0
+                && ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0)) {
             // Take advantage of a special trick that allows us to create a buffer
             // that is some multiple of the burst size.
             notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
+        } else {
+            notificationFrames = builder.getFramesPerDataCallback();
         }
     }
     mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -158,6 +161,26 @@
     setState(AAUDIO_STREAM_STATE_OPEN);
     setDeviceId(mAudioTrack->getRoutedDeviceId());
 
+    // Update performance mode based on the actual stream.
+    // For example, if the sample rate is not allowed then you won't get a FAST track.
+    audio_output_flags_t actualFlags = mAudioTrack->getFlags();
+    aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+    if ((actualFlags & (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW))
+        == (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)) {
+        actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+
+    } else if ((actualFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
+        actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+    }
+    setPerformanceMode(actualPerformanceMode);
+    // Log warning if we did not get what we asked for.
+    ALOGW_IF(actualFlags != flags,
+             "AudioStreamTrack::open() flags changed from 0x%08X to 0x%08X",
+             flags, actualFlags);
+    ALOGW_IF(actualPerformanceMode != perfMode,
+             "AudioStreamTrack::open() perfMode changed from %d to %d",
+             perfMode, actualPerformanceMode);
+
     return AAUDIO_OK;
 }
 
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index be2bd10..168ed86 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <cutils/properties.h>
 #include <stdint.h>
 #include <sys/types.h>
 #include <utils/Errors.h>
@@ -322,3 +323,52 @@
     *sizeInBytes = numFrames * bytesPerFrame;
     return AAUDIO_OK;
 }
+
+static int32_t AAudioProperty_getMMapProperty(const char *propName,
+                                              int32_t defaultValue,
+                                              const char * caller) {
+    int32_t prop = property_get_int32(AAUDIO_PROP_MMAP_ENABLED, defaultValue);
+    switch (prop) {
+        case AAUDIO_USE_NEVER:
+        case AAUDIO_USE_ALWAYS:
+        case AAUDIO_USE_AUTO:
+            break;
+        default:
+            ALOGE("%s: invalid = %d", caller, prop);
+            prop = defaultValue;
+            break;
+    }
+    return prop;
+}
+
+int32_t AAudioProperty_getMMapEnabled() {
+    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_ENABLED,
+                                          AAUDIO_USE_NEVER, __func__);
+}
+
+int32_t AAudioProperty_getMMapExclusiveEnabled() {
+    return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_ENABLED,
+                                          AAUDIO_USE_NEVER, __func__);
+}
+
+int32_t AAudioProperty_getMixerBursts() {
+    const int32_t defaultBursts = 2; // arbitrary
+    const int32_t maxBursts = 1024; // arbitrary
+    int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts); // use 2 for double buffered
+    if (prop < 1 || prop > maxBursts) {
+        ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
+        prop = defaultBursts;
+    }
+    return prop;
+}
+
+int32_t AAudioProperty_getHardwareBurstMinMicros() {
+    const int32_t defaultMicros = 1000; // arbitrary
+    const int32_t maxMicros = 1000 * 1000; // arbitrary
+    int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
+    if (prop < 1 || prop > maxMicros) {
+        ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d", prop);
+        prop = defaultMicros;
+    }
+    return prop;
+}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 0078cbb..7c383c7 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -170,4 +170,54 @@
  */
 int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
 
+
+// Note that this code may be replaced by Settings or by some other system configuration tool.
+
+enum : int32_t {
+    // Related feature is disabled
+    AAUDIO_USE_NEVER = 0,
+    // If related feature works then use it. Otherwise fall back to something else.
+    AAUDIO_USE_AUTO = 1,
+    // Related feature must be used. If not available then fail.
+    AAUDIO_USE_ALWAYS = 2
+};
+
+#define AAUDIO_PROP_MMAP_ENABLED           "aaudio.mmap_enabled"
+
+/**
+ * Read system property.
+ * @return AAUDIO_USE_NEVER or AAUDIO_USE_AUTO or AAUDIO_USE_ALWAYS
+ */
+int32_t AAudioProperty_getMMapEnabled();
+
+#define AAUDIO_PROP_MMAP_EXCLUSIVE_ENABLED "aaudio.mmap_exclusive_enabled"
+
+/**
+ * Read system property.
+ * @return AAUDIO_USE_NEVER or AAUDIO_USE_AUTO or AAUDIO_USE_ALWAYS
+ */
+int32_t AAudioProperty_getMMapExclusiveEnabled();
+
+#define AAUDIO_PROP_MIXER_BURSTS           "aaudio.mixer_bursts"
+
+/**
+ * Read system property.
+ * @return number of bursts per mixer cycle
+ */
+int32_t AAudioProperty_getMixerBursts();
+
+#define AAUDIO_PROP_HW_BURST_MIN_USEC      "aaudio.hw_burst_min_usec"
+
+/**
+ * Read system property.
+ * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
+ * For example, there may be a DMA burst every 100 usec but you only
+ * want to feed the MMAP buffer every 2000 usec.
+ *
+ * This will affect the framesPerBurst for an MMAP stream.
+ *
+ * @return minimum number of microseconds for a MMAP HW burst
+ */
+int32_t AAudioProperty_getHardwareBurstMinMicros();
+
 #endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 22b09d4..00a1f9c 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -98,6 +98,7 @@
     mBufferingMonitor->stop();
 
     mIsDrmProtected = false;
+    mIsDrmReleased = false;
     mIsSecure = false;
     mMimes.clear();
 }
@@ -690,6 +691,17 @@
           break;
       }
 
+      case kWhatReleaseDrm:
+      {
+          status_t status = onReleaseDrm();
+          sp<AMessage> response = new AMessage;
+          response->setInt32("status", status);
+          sp<AReplyToken> replyID;
+          CHECK(msg->senderAwaitsResponse(&replyID));
+          response->postReply(replyID);
+          break;
+      }
+
       default:
           Source::onMessageReceived(msg);
           break;
@@ -839,6 +851,13 @@
         return -EWOULDBLOCK;
     }
 
+    // If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c
+    // the codec's crypto object has gone away (b/37960096).
+    // Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283).
+    if (!mStarted && mIsDrmReleased) {
+        return -EWOULDBLOCK;
+    }
+
     Track *track = audio ? &mAudioTrack : &mVideoTrack;
 
     if (track->mSource == NULL) {
@@ -1897,11 +1916,31 @@
     return status;
 }
 
+status_t NuPlayer::GenericSource::releaseDrm()
+{
+    ALOGV("releaseDrm");
+
+    sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
+
+    // synchronous call to update the source states before the player proceedes with crypto cleanup
+    sp<AMessage> response;
+    status_t status = msg->postAndAwaitResponse(&response);
+
+    if (status == OK && response != NULL) {
+        ALOGD("releaseDrm ret: OK ");
+    } else {
+        ALOGE("releaseDrm err: %d", status);
+    }
+
+    return status;
+}
+
 status_t NuPlayer::GenericSource::onPrepareDrm(const sp<AMessage> &msg)
 {
     ALOGV("onPrepareDrm ");
 
     mIsDrmProtected = false;
+    mIsDrmReleased = false;
     mIsSecure = false;
 
     uint8_t *uuid;
@@ -1949,8 +1988,26 @@
     return status;
 }
 
+status_t NuPlayer::GenericSource::onReleaseDrm()
+{
+    if (mIsDrmProtected) {
+        mIsDrmProtected = false;
+        // to prevent returning any more buffer after stop/releaseDrm (b/37960096)
+        mIsDrmReleased = true;
+        ALOGV("onReleaseDrm: mIsDrmProtected is reset.");
+    } else {
+        ALOGE("onReleaseDrm: mIsDrmProtected is already false.");
+    }
+
+    return OK;
+}
+
 status_t NuPlayer::GenericSource::checkDrmInfo()
 {
+    // clearing the flag at prepare in case the player is reused after stop/releaseDrm with the
+    // same source without being reset (called by prepareAsync/initFromDataSource)
+    mIsDrmReleased = false;
+
     if (mFileMeta == NULL) {
         ALOGI("checkDrmInfo: No metadata");
         return OK; // letting the caller responds accordingly
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 64f21a6..b0c6695 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -91,6 +91,8 @@
     virtual status_t prepareDrm(
             const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto);
 
+    virtual status_t releaseDrm();
+
 
 protected:
     virtual ~GenericSource();
@@ -119,6 +121,7 @@
         kWhatSecureDecodersInstantiated,
         // Modular DRM
         kWhatPrepareDrm,
+        kWhatReleaseDrm,
     };
 
     struct Track {
@@ -308,10 +311,12 @@
 
     // Modular DRM
     bool mIsDrmProtected;
+    bool mIsDrmReleased;
     Vector<String8> mMimes;
 
     status_t checkDrmInfo();
     status_t onPrepareDrm(const sp<AMessage> &msg);
+    status_t onReleaseDrm();
 
     DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 0d4c730..6ded392 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -2787,6 +2787,11 @@
 
     status_t status;
     if (mCrypto != NULL) {
+        // notifying the source first before removing crypto from codec
+        if (mSource != NULL) {
+            mSource->releaseDrm();
+        }
+
         status=OK;
         // first making sure the codecs have released their crypto reference
         const sp<DecoderBase> &videoDecoder = getDecoder(false/*audio*/);
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index f2a4d06..0bb4dbb 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -469,6 +469,22 @@
         const char *mime;
         CHECK(track->meta->findCString(kKeyMIMEType, &mime));
         if (!strncasecmp("video/", mime, 6)) {
+            // MPEG2 tracks do not provide CSD, so read the stream header
+            if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
+                off64_t offset;
+                size_t size;
+                if (track->sampleTable->getMetaDataForSample(
+                            0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
+                    if (size > kMaxTrackHeaderSize) {
+                        size = kMaxTrackHeaderSize;
+                    }
+                    uint8_t header[kMaxTrackHeaderSize];
+                    if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
+                        track->meta->setData(kKeyStreamHeader, 'mdat', header, size);
+                    }
+                }
+            }
+
             if (mMoofOffset > 0) {
                 int64_t duration;
                 if (track->meta->findInt64(kKeyDuration, &duration)) {
@@ -489,22 +505,6 @@
                             ((int64_t)sampleTime * 1000000) / track->timescale);
                 }
             }
-
-            // MPEG2 tracks do not provide CSD, so read the stream header
-            if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
-                off64_t offset;
-                size_t size;
-                if (track->sampleTable->getMetaDataForSample(
-                            0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
-                    if (size > kMaxTrackHeaderSize) {
-                        size = kMaxTrackHeaderSize;
-                    }
-                    uint8_t header[kMaxTrackHeaderSize];
-                    if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
-                        track->meta->setData(kKeyStreamHeader, 'mdat', header, size);
-                    }
-                }
-            }
         }
     }
 
@@ -1240,6 +1240,7 @@
             ALOGV("allocated pssh @ %p", pssh.data);
             ssize_t requested = (ssize_t) pssh.datalen;
             if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) {
+                delete[] pssh.data;
                 return ERROR_IO;
             }
             mPssh.push_back(pssh);
diff --git a/media/mtp/IMtpHandle.h b/media/mtp/IMtpHandle.h
index 9185255..0557596 100644
--- a/media/mtp/IMtpHandle.h
+++ b/media/mtp/IMtpHandle.h
@@ -27,7 +27,7 @@
     virtual int write(const void *data, int len) = 0;
 
     // Return 0 if send/receive is successful, or -1 and errno is set
-    virtual int receiveFile(mtp_file_range mfr) = 0;
+    virtual int receiveFile(mtp_file_range mfr, bool zero_packet) = 0;
     virtual int sendFile(mtp_file_range mfr) = 0;
     virtual int sendEvent(mtp_event me) = 0;
 
diff --git a/media/mtp/MtpDevHandle.cpp b/media/mtp/MtpDevHandle.cpp
index afc0525..9aa0aec 100644
--- a/media/mtp/MtpDevHandle.cpp
+++ b/media/mtp/MtpDevHandle.cpp
@@ -45,7 +45,7 @@
     int read(void *data, int len);
     int write(const void *data, int len);
 
-    int receiveFile(mtp_file_range mfr);
+    int receiveFile(mtp_file_range mfr, bool);
     int sendFile(mtp_file_range mfr);
     int sendEvent(mtp_event me);
 
@@ -68,7 +68,7 @@
     return ::write(mFd, data, len);
 }
 
-int MtpDevHandle::receiveFile(mtp_file_range mfr) {
+int MtpDevHandle::receiveFile(mtp_file_range mfr, bool) {
     return ioctl(mFd, MTP_RECEIVE_FILE, reinterpret_cast<unsigned long>(&mfr));
 }
 
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index c78002c..c50af2f 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -516,7 +516,7 @@
 }
 
 /* Read from USB and write to a local file. */
-int MtpFfsHandle::receiveFile(mtp_file_range mfr) {
+int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
     // When receiving files, the incoming length is given in 32 bits.
     // A >4G file is given as 0xFFFFFFFF
     uint32_t file_length = mfr.length;
@@ -538,7 +538,7 @@
     aio.aio_fildes = mfr.fd;
     aio.aio_buf = nullptr;
     struct aiocb *aiol[] = {&aio};
-    int ret;
+    int ret = -1;
     size_t length;
     bool read = false;
     bool write = false;
@@ -590,11 +590,6 @@
             } else {
                 // Receive an empty packet if size is a multiple of the endpoint size.
                 file_length -= ret;
-                if (file_length == 0 && ret % packet_size == 0) {
-                    if (TEMP_FAILURE_RETRY(::read(mBulkOut, data, packet_size)) != 0) {
-                        return -1;
-                    }
-                }
             }
             // Enqueue a new write request
             aio.aio_buf = data;
@@ -610,6 +605,11 @@
             read = false;
         }
     }
+    if (ret % packet_size == 0 || zero_packet) {
+        if (TEMP_FAILURE_RETRY(::read(mBulkOut, data, packet_size)) != 0) {
+            return -1;
+        }
+    }
     return 0;
 }
 
@@ -660,10 +660,9 @@
                     sizeof(mtp_data_header), init_read_len, offset))
             != init_read_len) return -1;
     if (writeHandle(mBulkIn, data, sizeof(mtp_data_header) + init_read_len) == -1) return -1;
-    if (file_length == static_cast<unsigned>(init_read_len)) return 0;
     file_length -= init_read_len;
     offset += init_read_len;
-    ret = 0;
+    ret = init_read_len + sizeof(mtp_data_header);
 
     // Break down the file into pieces that fit in buffers
     while(file_length > 0) {
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index 7491a1b..98669ff 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -55,7 +55,7 @@
     int read(void *data, int len);
     int write(const void *data, int len);
 
-    int receiveFile(mtp_file_range mfr);
+    int receiveFile(mtp_file_range mfr, bool zero_packet);
     int sendFile(mtp_file_range mfr);
     int sendEvent(mtp_event me);
 
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 88dabff..5c33265 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -1052,23 +1052,22 @@
         ALOGE("failed to write initial data");
         result = MTP_RESPONSE_GENERAL_ERROR;
     } else {
-        if (mSendObjectFileSize - initialData > 0) {
-            mfr.offset = initialData;
-            if (mSendObjectFileSize == 0xFFFFFFFF) {
-                // tell driver to read until it receives a short packet
-                mfr.length = 0xFFFFFFFF;
-            } else {
-                mfr.length = mSendObjectFileSize - initialData;
-            }
+        mfr.offset = initialData;
+        if (mSendObjectFileSize == 0xFFFFFFFF) {
+            // tell driver to read until it receives a short packet
+            mfr.length = 0xFFFFFFFF;
+        } else {
+            mfr.length = mSendObjectFileSize - initialData;
+        }
 
-            mfr.command = 0;
-            mfr.transaction_id = 0;
+        mfr.command = 0;
+        mfr.transaction_id = 0;
 
-            // transfer the file
-            ret = sHandle->receiveFile(mfr);
-            if ((ret < 0) && (errno == ECANCELED)) {
-                isCanceled = true;
-            }
+        // transfer the file
+        ret = sHandle->receiveFile(mfr, mfr.length == 0 &&
+                initialData == MTP_BUFFER_SIZE - MTP_CONTAINER_HEADER_SIZE);
+        if ((ret < 0) && (errno == ECANCELED)) {
+            isCanceled = true;
         }
     }
     struct stat sstat;
@@ -1256,19 +1255,18 @@
     if (ret < 0) {
         ALOGE("failed to write initial data");
     } else {
-        if (length > 0) {
-            mtp_file_range  mfr;
-            mfr.fd = edit->mFD;
-            mfr.offset = offset;
-            mfr.length = length;
-            mfr.command = 0;
-            mfr.transaction_id = 0;
+        mtp_file_range  mfr;
+        mfr.fd = edit->mFD;
+        mfr.offset = offset;
+        mfr.length = length;
+        mfr.command = 0;
+        mfr.transaction_id = 0;
 
-            // transfer the file
-            ret = sHandle->receiveFile(mfr);
-            if ((ret < 0) && (errno == ECANCELED)) {
-                isCanceled = true;
-            }
+        // transfer the file
+        ret = sHandle->receiveFile(mfr, mfr.length == 0 &&
+                initialData == MTP_BUFFER_SIZE - MTP_CONTAINER_HEADER_SIZE);
+        if ((ret < 0) && (errno == ECANCELED)) {
+            isCanceled = true;
         }
     }
     if (ret < 0) {
diff --git a/media/mtp/tests/MtpFfsHandle_test.cpp b/media/mtp/tests/MtpFfsHandle_test.cpp
index e575148..554f867 100644
--- a/media/mtp/tests/MtpFfsHandle_test.cpp
+++ b/media/mtp/tests/MtpFfsHandle_test.cpp
@@ -116,7 +116,7 @@
         ss << dummyDataStr;
 
     EXPECT_EQ(write(bulk_out, ss.str().c_str(), size), size);
-    EXPECT_EQ(handle->receiveFile(mfr), 0);
+    EXPECT_EQ(handle->receiveFile(mfr, false), 0);
 
     EXPECT_EQ(read(dummy_file.fd, buf, size), size);
 
@@ -136,7 +136,7 @@
         ss << dummyDataStr;
 
     EXPECT_EQ(write(bulk_out, ss.str().c_str(), size), size);
-    EXPECT_EQ(handle->receiveFile(mfr), 0);
+    EXPECT_EQ(handle->receiveFile(mfr, false), 0);
 
     EXPECT_EQ(read(dummy_file.fd, buf, size), size);
 
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index d3e182a..a2e6d33 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -44,10 +44,6 @@
 // This is the maximum size in frames. The effective size can be tuned smaller at runtime.
 #define DEFAULT_BUFFER_CAPACITY   (48 * 8)
 
-// Use 2 for "double buffered"
-#define BUFFER_SIZE_IN_BURSTS     2
-#define BURSTS_PER_MIX_LOOP       1
-
 // The mStreamInternal will use a service interface that does not go through Binder.
 AAudioServiceEndpoint::AAudioServiceEndpoint(AAudioService &audioService)
         : mStreamInternal(audioService, true)
@@ -71,7 +67,13 @@
     if (result == AAUDIO_OK) {
         mMixer.allocate(mStreamInternal.getSamplesPerFrame(), mStreamInternal.getFramesPerBurst());
 
-        int32_t desiredBufferSize = BUFFER_SIZE_IN_BURSTS * mStreamInternal.getFramesPerBurst();
+        int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+        if (burstsPerBuffer == 0) {
+            mLatencyTuningEnabled = true;
+            burstsPerBuffer = 2;
+        }
+        ALOGD("AAudioServiceEndpoint(): burstsPerBuffer = %d", burstsPerBuffer);
+        int32_t desiredBufferSize = burstsPerBuffer * mStreamInternal.getFramesPerBurst();
         mStreamInternal.setBufferSize(desiredBufferSize);
     }
     return result;
@@ -117,7 +119,6 @@
 
 static void *aaudio_mixer_thread_proc(void *context) {
     AAudioServiceEndpoint *stream = (AAudioServiceEndpoint *) context;
-    //LOGD("AudioStreamAAudio(): oboe_callback_thread, stream = %p", stream);
     if (stream != NULL) {
         return stream->callbackLoop();
     } else {
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index a4ceae6..d0c2f53 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -77,6 +77,7 @@
 
     std::atomic<bool>        mCallbackEnabled;
     int32_t                  mReferenceCount = 0;
+    bool                     mLatencyTuningEnabled = false; // TODO implement tuning
 
     std::mutex               mLockStreams;
     std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index cadc2a4..78a1583 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -178,6 +178,21 @@
     mAudioFormat = AAudioConvert_androidToAAudioDataFormat(config.format);
     mSampleRate = config.sample_rate;
 
+    // Scale up the burst size to meet the minimum equivalent in microseconds.
+    // This is to avoid waking the CPU too often when the HW burst is very small
+    // or at high sample rates.
+    int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
+    int32_t burstMicros = 0;
+    do {
+        if (burstMicros > 0) {  // skip first loop
+            mFramesPerBurst *= 2;
+        }
+        burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / mSampleRate;
+    } while (burstMicros < burstMinMicros);
+
+    ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
+          mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
+
     ALOGD("AAudioServiceStreamMMAP::open() got devId = %d, sRate = %d",
           deviceId, config.sample_rate);