Merge "AudioTrack: explain why FAST mixer denied" into oc-dr1-dev
diff --git a/camera/Android.bp b/camera/Android.bp
index 64185e1..849f560 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -67,7 +67,10 @@
         "system/media/private/camera/include",
         "frameworks/native/include/media/openmax",
     ],
-    export_include_dirs: ["include/camera"],
+    export_include_dirs: [
+         "include",
+         "include/camera"
+    ],
     export_shared_lib_headers: ["libcamera_metadata"],
 
     cflags: [
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index e56f675..386546f 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -411,8 +411,11 @@
     }
 
     setListener(NULL);
-    mPlugin->setListener(NULL);
+    if (mPlugin != NULL) {
+        mPlugin->setListener(NULL);
+    }
     mPlugin.clear();
+    mInitCheck = NO_INIT;
 
     return OK;
 }
@@ -960,8 +963,11 @@
 {
     Mutex::Autolock autoLock(mLock);
     setListener(NULL);
-    mPlugin->setListener(NULL);
+    if (mPlugin != NULL) {
+        mPlugin->setListener(NULL);
+    }
     mPlugin.clear();
+    mInitCheck = NO_INIT;
 }
 
 void DrmHal::writeByteArray(Parcel &obj, hidl_vec<uint8_t> const &vec)
diff --git a/drm/mediadrm/plugins/clearkey/Android.bp b/drm/mediadrm/plugins/clearkey/Android.bp
index 6af7cd8..f3ce65c 100644
--- a/drm/mediadrm/plugins/clearkey/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/Android.bp
@@ -38,12 +38,17 @@
     shared_libs: [
         "libcrypto",
         "liblog",
-        "libstagefright_foundation",
+        "libstagefright_foundation_vendor",
         "libutils",
     ],
 
     static_libs: ["libjsmn"],
 
+    include_dirs: [
+        "frameworks/native/include",
+        "frameworks/av/include",
+    ],
+
     export_include_dirs: ["."],
     export_static_lib_headers: ["libjsmn"],
 }
diff --git a/drm/mediadrm/plugins/clearkey/tests/Android.bp b/drm/mediadrm/plugins/clearkey/tests/Android.bp
index 1b208ad..976c590 100644
--- a/drm/mediadrm/plugins/clearkey/tests/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/tests/Android.bp
@@ -19,6 +19,7 @@
 
 cc_test {
     name: "ClearKeyDrmUnitTest",
+    vendor: true,
 
     srcs: [
         "AesCtrDecryptorUnittest.cpp",
@@ -30,7 +31,7 @@
         "libcrypto",
         "libdrmclearkeyplugin",
         "liblog",
-        "libstagefright_foundation",
+        "libstagefright_foundation_vendor",
         "libutils",
     ],
 }
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
index b56328b..5053e7d 100644
--- a/media/libaaudio/examples/input_monitor/Android.mk
+++ b/media/libaaudio/examples/input_monitor/Android.mk
@@ -1,6 +1 @@
-# include $(call all-subdir-makefiles)
-
-# Just include static/ for now.
-LOCAL_PATH := $(call my-dir)
-#include $(LOCAL_PATH)/jni/Android.mk
-include $(LOCAL_PATH)/static/Android.mk
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
index 3e24f9f..9b1ce2c 100644
--- a/media/libaaudio/examples/input_monitor/jni/Android.mk
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -11,7 +11,7 @@
 # NDK recommends using this kind of relative path instead of an absolute path.
 LOCAL_SRC_FILES:= ../src/input_monitor.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_ndk
+LOCAL_MODULE := input_monitor
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
@@ -23,11 +23,11 @@
 
 LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_callback_ndk
+LOCAL_MODULE := input_monitor_callback
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE := libaaudio_prebuilt
 LOCAL_SRC_FILES := libaaudio.so
 LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
\ No newline at end of file
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
deleted file mode 100644
index 80a3906..0000000
--- a/media/libaaudio/examples/input_monitor/static/Android.mk
+++ /dev/null
@@ -1,37 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := samples
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-# TODO reorganize folders to avoid using ../
-LOCAL_SRC_FILES:= ../src/input_monitor.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libtinyalsa libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := input_monitor
-include $(BUILD_EXECUTABLE)
-
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := input_monitor_callback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/input_monitor/static/README.md b/media/libaaudio/examples/input_monitor/static/README.md
deleted file mode 100644
index 6e26d7b..0000000
--- a/media/libaaudio/examples/input_monitor/static/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Makefile for building simple command line examples.
-They link with AAudio as a static library.
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
index c306ed3..d630e76 100644
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -11,7 +11,7 @@
 # NDK recommends using this kind of relative path instead of an absolute path.
 LOCAL_SRC_FILES:= ../src/write_sine.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_ndk
+LOCAL_MODULE := write_sine
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
@@ -23,7 +23,7 @@
 
 LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_callback_ndk
+LOCAL_MODULE := write_sine_callback
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
deleted file mode 100644
index 1f8dcd9..0000000
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ /dev/null
@@ -1,38 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := samples
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/write_sine.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libtinyalsa libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := write_sine
-include $(BUILD_EXECUTABLE)
-
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := write_sine_callback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/write_sine/static/README.md b/media/libaaudio/examples/write_sine/static/README.md
deleted file mode 100644
index 6e26d7b..0000000
--- a/media/libaaudio/examples/write_sine/static/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Makefile for building simple command line examples.
-They link with AAudio as a static library.
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index b8ef611..ef4a51f 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -45,11 +45,9 @@
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
-        ALOGV("BpAAudioService::client openStream --------------------");
         // request.dump();
         request.writeToParcel(&data);
         status_t err = remote()->transact(OPEN_STREAM, data, &reply);
-        ALOGV("BpAAudioService::client openStream returned %d", err);
         if (err != NO_ERROR) {
             ALOGE("BpAAudioService::client openStream transact failed %d", err);
             return AAudioConvert_androidToAAudioResult(err);
@@ -57,6 +55,7 @@
         // parse reply
         aaudio_handle_t stream;
         err = reply.readInt32(&stream);
+        ALOGD("BpAAudioService::client openStream returned stream = 0x%08x", stream);
         if (err != NO_ERROR) {
             ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
             return AAudioConvert_androidToAAudioResult(err);
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 5cb642b..0684ed6 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -246,3 +246,7 @@
     return (int32_t)mDataQueue->getBufferCapacityInFrames();
 }
 
+void AudioEndpoint::dump() const {
+    ALOGD("AudioEndpoint: data readCounter  = %lld", (long long) mDataQueue->getReadCounter());
+    ALOGD("AudioEndpoint: data writeCounter = %lld", (long long) mDataQueue->getWriteCounter());
+}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 53ba033..e7c6916 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -91,6 +91,8 @@
 
     int32_t getBufferCapacityInFrames() const;
 
+    void dump() const;
+
 private:
     android::FifoBuffer    *mUpCommandQueue;
     android::FifoBuffer    *mDataQueue;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 3a827f0..b59c445 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -62,8 +62,9 @@
         , mAudioEndpoint()
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mFramesPerBurst(16)
-        , mServiceInterface(serviceInterface)
-        , mInService(inService) {
+        , mStreamVolume(1.0f)
+        , mInService(inService)
+        , mServiceInterface(serviceInterface) {
 }
 
 AudioStreamInternal::~AudioStreamInternal() {
@@ -153,13 +154,13 @@
         if (getDataCallbackProc()) {
             mCallbackFrames = builder.getFramesPerDataCallback();
             if (mCallbackFrames > getBufferCapacity() / 2) {
-                ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
+                ALOGE("AudioStreamInternal::open(): framesPerCallback too big = %d, capacity = %d",
                       mCallbackFrames, getBufferCapacity());
                 mServiceInterface.closeStream(mServiceStreamHandle);
                 return AAUDIO_ERROR_OUT_OF_RANGE;
 
             } else if (mCallbackFrames < 0) {
-                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+                ALOGE("AudioStreamInternal::open(): framesPerCallback negative");
                 mServiceInterface.closeStream(mServiceStreamHandle);
                 return AAUDIO_ERROR_OUT_OF_RANGE;
 
@@ -175,12 +176,16 @@
         }
 
         setState(AAUDIO_STREAM_STATE_OPEN);
+        // only connect to AudioManager if this is a playback stream running in client process
+        if (!mInService && getDirection() == AAUDIO_DIRECTION_OUTPUT) {
+            init(android::PLAYER_TYPE_AAUDIO, AUDIO_USAGE_MEDIA);
+        }
     }
     return result;
 }
 
 aaudio_result_t AudioStreamInternal::close() {
-    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+    ALOGD("AudioStreamInternal::close(): mServiceStreamHandle = 0x%08X",
              mServiceStreamHandle);
     if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
         // Don't close a stream while it is running.
@@ -196,12 +201,14 @@
                 result, AAudio_convertResultToText(result));
             }
         }
+        setState(AAUDIO_STREAM_STATE_CLOSING);
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
         mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
 
         mServiceInterface.closeStream(serviceStreamHandle);
         delete[] mCallbackBuffer;
         mCallbackBuffer = nullptr;
+        setState(AAUDIO_STREAM_STATE_CLOSED);
         return mEndPointParcelable.close();
     } else {
         return AAUDIO_ERROR_INVALID_HANDLE;
@@ -223,15 +230,20 @@
 aaudio_result_t AudioStreamInternal::requestStart()
 {
     int64_t startTime;
-    ALOGD("AudioStreamInternal(): start()");
+    ALOGD("AudioStreamInternal()::requestStart()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    if (isActive()) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_stream_state_t originalState = getState();
+
+    setState(AAUDIO_STREAM_STATE_STARTING);
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());
 
     startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
-    setState(AAUDIO_STREAM_STATE_STARTING);
-    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
 
     if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
         // Launch the callback loop thread.
@@ -241,6 +253,9 @@
         mCallbackEnabled.store(true);
         result = createThread(periodNanos, aaudio_callback_thread_proc, this);
     }
+    if (result != AAUDIO_OK) {
+        setState(originalState);
+    }
     return result;
 }
 
@@ -274,14 +289,14 @@
 aaudio_result_t AudioStreamInternal::requestPauseInternal()
 {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
-    return mServiceInterface.pauseStream(mServiceStreamHandle);
+    return AAudioConvert_androidToAAudioResult(pauseWithStatus());
 }
 
 aaudio_result_t AudioStreamInternal::requestPause()
@@ -296,7 +311,7 @@
 
 aaudio_result_t AudioStreamInternal::requestFlush() {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -307,13 +322,14 @@
 
 // TODO for Play only
 void AudioStreamInternal::onFlushFromServer() {
-    ALOGD("AudioStreamInternal(): onFlushFromServer()");
     int64_t readCounter = mAudioEndpoint.getDataReadCounter();
     int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
 
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
     int64_t framesFlushed = writeCounter - readCounter;
     mFramesOffsetFromService += framesFlushed;
+    ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
+          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
 
     // Flush written frames by forcing writeCounter to readCounter.
     // This is because we cannot move the read counter in the hardware.
@@ -323,14 +339,14 @@
 aaudio_result_t AudioStreamInternal::requestStopInternal()
 {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_STOPPING);
-    return mServiceInterface.stopStream(mServiceStreamHandle);
+    return AAudioConvert_androidToAAudioResult(stopWithStatus());
 }
 
 aaudio_result_t AudioStreamInternal::requestStop()
@@ -365,7 +381,7 @@
                            int64_t *timeNanoseconds) {
     // TODO Generate in server and pass to client. Return latest.
     int64_t time = AudioClock::getNanoseconds();
-    *framePosition = mClockModel.convertTimeToPosition(time);
+    *framePosition = mClockModel.convertTimeToPosition(time) + mFramesOffsetFromService;
     // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
     *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
     return AAUDIO_OK;
@@ -378,31 +394,28 @@
     return processCommands();
 }
 
-#if LOG_TIMESTAMPS
-static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
+void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
     static int64_t oldPosition = 0;
     static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
     int64_t nanoTime = command.timestamp.timestamp;
-    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %lld",
+    ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
          (long long) framePosition,
          (long long) nanoTime);
     int64_t nanosDelta = nanoTime - oldTime;
     if (nanosDelta > 0 && oldTime > 0) {
         int64_t framesDelta = framePosition - oldPosition;
         int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
-        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
-        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
-        ALOGD("AudioStreamInternal() - measured rate = %lld", (long long) rate);
+        ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
+              (long long) framesDelta, (long long) nanosDelta, (long long) rate);
     }
     oldPosition = framePosition;
     oldTime = nanoTime;
 }
-#endif
 
 aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
 #if LOG_TIMESTAMPS
-    AudioStreamInternal_logTimestamp(*message);
+    logTimestamp(*message);
 #endif
     processTimestamp(message->timestamp.position, message->timestamp.timestamp);
     return AAUDIO_OK;
@@ -412,46 +425,48 @@
     aaudio_result_t result = AAUDIO_OK;
     switch (message->event.event) {
         case AAUDIO_SERVICE_EVENT_STARTED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_STARTED");
             if (getState() == AAUDIO_STREAM_STATE_STARTING) {
                 setState(AAUDIO_STREAM_STATE_STARTED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_PAUSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_PAUSED");
             if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
                 setState(AAUDIO_STREAM_STATE_PAUSED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_STOPPED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_STOPPED");
             if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
                 setState(AAUDIO_STREAM_STATE_STOPPED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_FLUSHED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+            ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_FLUSHED");
             if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
                 setState(AAUDIO_STREAM_STATE_FLUSHED);
                 onFlushFromServer();
             }
             break;
         case AAUDIO_SERVICE_EVENT_CLOSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+            ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_CLOSED");
             setState(AAUDIO_STREAM_STATE_CLOSED);
             break;
         case AAUDIO_SERVICE_EVENT_DISCONNECTED:
             result = AAUDIO_ERROR_DISCONNECTED;
             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
-            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
+            ALOGW("WARNING - AudioStreamInternal::onEventFromServer()"
+                          " AAUDIO_SERVICE_EVENT_DISCONNECTED");
             break;
         case AAUDIO_SERVICE_EVENT_VOLUME:
-            mVolumeRamp.setTarget((float) message->event.dataDouble);
-            ALOGD("processCommands() AAUDIO_SERVICE_EVENT_VOLUME %lf",
+            mStreamVolume = (float)message->event.dataDouble;
+            doSetVolume();
+            ALOGD("AudioStreamInternal::onEventFromServer() AAUDIO_SERVICE_EVENT_VOLUME %lf",
                      message->event.dataDouble);
             break;
         default:
-            ALOGW("WARNING - processCommands() Unrecognized event = %d",
+            ALOGW("WARNING - AudioStreamInternal::onEventFromServer() Unrecognized event = %d",
                  (int) message->event.event);
             break;
     }
@@ -493,27 +508,27 @@
 {
     const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
     ATRACE_BEGIN(traceName);
-    aaudio_result_t result = AAUDIO_OK;
-    int32_t loopCount = 0;
-    uint8_t* audioData = (uint8_t*)buffer;
-    int64_t currentTimeNanos = AudioClock::getNanoseconds();
-    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
-    int32_t framesLeft = numFrames;
-
     int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
     if (ATRACE_ENABLED()) {
         const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
         ATRACE_INT(traceName, fullFrames);
     }
 
+    aaudio_result_t result = AAUDIO_OK;
+    int32_t loopCount = 0;
+    uint8_t* audioData = (uint8_t*)buffer;
+    int64_t currentTimeNanos = AudioClock::getNanoseconds();
+    const int64_t entryTimeNanos = currentTimeNanos;
+    const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int32_t framesLeft = numFrames;
+
     // Loop until all the data has been processed or until a timeout occurs.
     while (framesLeft > 0) {
-        // The call to processDataNow() will not block. It will just read as much as it can.
+        // The call to processDataNow() will not block. It will just process as much as it can.
         int64_t wakeTimeNanos = 0;
         aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
                                                   currentTimeNanos, &wakeTimeNanos);
         if (framesProcessed < 0) {
-            ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
             result = framesProcessed;
             break;
         }
@@ -531,12 +546,16 @@
             if (wakeTimeNanos > deadlineNanos) {
                 // If we time out, just return the framesWritten so far.
                 // TODO remove after we fix the deadline bug
-                ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
+                ALOGW("AudioStreamInternal::processData(): entered at %lld nanos, currently %lld",
+                      (long long) entryTimeNanos, (long long) currentTimeNanos);
+                ALOGW("AudioStreamInternal::processData(): timed out after %lld nanos",
                       (long long) timeoutNanoseconds);
-                ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+                ALOGW("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
                       (long long) wakeTimeNanos, (long long) deadlineNanos);
-                ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
+                ALOGW("AudioStreamInternal::processData(): past deadline by %d micros",
                       (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
+                mClockModel.dump();
+                mAudioEndpoint.dump();
                 break;
             }
 
@@ -588,3 +607,32 @@
 aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
     return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
 }
+
+void AudioStreamInternal::doSetVolume() {
+    // No pan and only left volume is taken into account from IPLayer interface
+    mVolumeRamp.setTarget(mStreamVolume * mVolumeMultiplierL /* * mPanMultiplierL */);
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of PlayerBase
+status_t AudioStreamInternal::playerStart() {
+    return AAudioConvert_aaudioToAndroidStatus(mServiceInterface.startStream(mServiceStreamHandle));
+}
+
+status_t AudioStreamInternal::playerPause() {
+    return AAudioConvert_aaudioToAndroidStatus(mServiceInterface.pauseStream(mServiceStreamHandle));
+}
+
+status_t AudioStreamInternal::playerStop() {
+    return AAudioConvert_aaudioToAndroidStatus(mServiceInterface.stopStream(mServiceStreamHandle));
+}
+
+status_t AudioStreamInternal::playerSetVolume() {
+    doSetVolume();
+    return NO_ERROR;
+}
+
+void AudioStreamInternal::destroy() {
+    baseDestroy();
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index a11f309..c2c6419 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -18,6 +18,7 @@
 #define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_H
 
 #include <stdint.h>
+#include <media/PlayerBase.h>
 #include <aaudio/AAudio.h>
 
 #include "binding/IAAudioService.h"
@@ -34,7 +35,7 @@
 namespace aaudio {
 
 // A stream that talks to the AAudioService or directly to a HAL.
-class AudioStreamInternal : public AudioStream {
+class AudioStreamInternal : public AudioStream, public android::PlayerBase  {
 
 public:
     AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService);
@@ -89,6 +90,9 @@
     // Calculate timeout based on framesPerBurst
     int64_t calculateReasonableTimeout();
 
+    //PlayerBase virtuals
+    virtual void destroy();
+
 protected:
 
     aaudio_result_t processData(void *buffer,
@@ -121,9 +125,19 @@
 
     aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
 
+    void logTimestamp(AAudioServiceMessage &message);
+
     // Calculate timeout for an operation involving framesPerOperation.
     int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
+    void doSetVolume();
+
+    //PlayerBase virtuals
+    virtual status_t playerStart();
+    virtual status_t playerPause();
+    virtual status_t playerStop();
+    virtual status_t playerSetVolume();
+
     aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
 
     IsochronousClockModel    mClockModel;      // timing model for chasing the HAL
@@ -135,6 +149,7 @@
     int32_t                  mXRunCount = 0;      // how many underrun events?
 
     LinearRamp               mVolumeRamp;
+    float                    mStreamVolume;
 
     // Offset from underlying frame position.
     int64_t                  mFramesOffsetFromService = 0; // offset for timestamps
@@ -142,6 +157,9 @@
     uint8_t                 *mCallbackBuffer = nullptr;
     int32_t                  mCallbackFrames = 0;
 
+    // The service uses this for SHARED mode.
+    bool                     mInService = false;  // Is this running in the client or the service?
+
 private:
     /*
      * Asynchronous write with data conversion.
@@ -159,8 +177,6 @@
     EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
     AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
 
-    // The service uses this for SHARED mode.
-    bool                     mInService = false;  // Is this running in the client or the service?
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 93693bd..22f8bd1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -14,10 +14,11 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudio"
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <algorithm>
 #include <aaudio/AAudio.h>
 
 #include "client/AudioStreamInternalCapture.h"
@@ -155,29 +156,27 @@
 
     int32_t framesProcessed = numFrames - framesLeft;
     mAudioEndpoint.advanceReadIndex(framesProcessed);
-    incrementFramesRead(framesProcessed);
 
     //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
     return framesProcessed;
 }
 
-int64_t AudioStreamInternalCapture::getFramesWritten()
-{
-    int64_t frames =
-            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
-            + mFramesOffsetFromService;
-    // Prevent retrograde motion.
-    if (frames < mLastFramesWritten) {
-        frames = mLastFramesWritten;
+int64_t AudioStreamInternalCapture::getFramesWritten() {
+    int64_t framesWrittenHardware;
+    if (isActive()) {
+        framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
     } else {
-        mLastFramesWritten = frames;
+        framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
     }
-    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld", (long long)frames);
-    return frames;
+    // Prevent retrograde motion.
+    mLastFramesWritten = std::max(mLastFramesWritten,
+                                  framesWrittenHardware + mFramesOffsetFromService);
+    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
+    //      (long long)mLastFramesWritten);
+    return mLastFramesWritten;
 }
 
-int64_t AudioStreamInternalCapture::getFramesRead()
-{
+int64_t AudioStreamInternalCapture::getFramesRead() {
     int64_t frames = mAudioEndpoint.getDataWriteCounter()
                                + mFramesOffsetFromService;
     //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index fc9766f..76ecbf9 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudio"
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
@@ -51,15 +51,17 @@
     }
 
     if (mAudioEndpoint.isFreeRunning()) {
-        //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
         // Update data queue based on the timing model.
         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
         mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
     }
-    // TODO else query from endpoint cuz set by actual reader, maybe
 
     // If the read index passed the write index then consider it an underrun.
     if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+        ALOGV("AudioStreamInternal::processDataNow() - XRun! write = %d, read = %d",
+              (int)mAudioEndpoint.getDataWriteCounter(),
+              (int)mAudioEndpoint.getDataReadCounter());
         mXRunCount++;
     }
 
@@ -201,9 +203,6 @@
     int32_t framesWritten = numFrames - framesLeft;
     mAudioEndpoint.advanceWriteIndex(framesWritten);
 
-    if (framesWritten > 0) {
-        incrementFramesWritten(framesWritten);
-    }
     // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
     return framesWritten;
 }
@@ -211,25 +210,29 @@
 
 int64_t AudioStreamInternalPlay::getFramesRead()
 {
-    int64_t framesRead =
-            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
-            + mFramesOffsetFromService;
+    int64_t framesReadHardware;
+    if (isActive()) {
+        framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
+    } else {
+        framesReadHardware = mAudioEndpoint.getDataReadCounter();
+    }
+    int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
     // Prevent retrograde motion.
     if (framesRead < mLastFramesRead) {
         framesRead = mLastFramesRead;
     } else {
         mLastFramesRead = framesRead;
     }
-    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
     return framesRead;
 }
 
 int64_t AudioStreamInternalPlay::getFramesWritten()
 {
-    int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
+    int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
                                + mFramesOffsetFromService;
-    ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
-    return getFramesWritten;
+    //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
+    return framesWritten;
 }
 
 
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 1de33bb..73f4c1d 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -41,6 +41,13 @@
 IsochronousClockModel::~IsochronousClockModel() {
 }
 
+void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
+    ALOGV("IsochronousClockModel::setPositionAndTime(%lld, %lld)",
+          (long long) framePosition, (long long) nanoTime);
+    mMarkerFramePosition = framePosition;
+    mMarkerNanoTime = nanoTime;
+}
+
 void IsochronousClockModel::start(int64_t nanoTime) {
     ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
     mMarkerNanoTime = nanoTime;
@@ -49,8 +56,8 @@
 
 void IsochronousClockModel::stop(int64_t nanoTime) {
     ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
-    mMarkerNanoTime = nanoTime;
-    mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+    setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
+    // TODO should we set position?
     mState = STATE_STOPPED;
 }
 
@@ -79,15 +86,13 @@
     case STATE_STOPPED:
         break;
     case STATE_STARTING:
-        mMarkerFramePosition = framePosition;
-        mMarkerNanoTime = nanoTime;
+        setPositionAndTime(framePosition, nanoTime);
         mState = STATE_SYNCING;
         break;
     case STATE_SYNCING:
         // This will handle a burst of rapid transfer at the beginning.
         if (nanosDelta < expectedNanosDelta) {
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime;
+            setPositionAndTime(framePosition, nanoTime);
         } else {
 //            ALOGD("processTimestamp() - advance to STATE_RUNNING");
             mState = STATE_RUNNING;
@@ -98,17 +103,15 @@
             // Earlier than expected timestamp.
             // This data is probably more accurate so use it.
             // or we may be drifting due to a slow HW clock.
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime;
 //            ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
 //                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+            setPositionAndTime(framePosition, nanoTime);
         } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
             // Later than expected timestamp.
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
 //            ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
 //                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
 //                 (int) (mMaxLatenessInNanos / 1000));
+            setPositionAndTime(framePosition - mFramesPerBurst,  nanoTime - mMaxLatenessInNanos);
         }
         break;
     default:
@@ -131,8 +134,7 @@
     mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
 }
 
-int64_t IsochronousClockModel::convertDeltaPositionToTime(
-        int64_t framesDelta) const {
+int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
     return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
 }
 
@@ -171,3 +173,12 @@
 //         (long long) framesDelta, mFramesPerBurst);
     return position;
 }
+
+void IsochronousClockModel::dump() const {
+    ALOGD("IsochronousClockModel::mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
+    ALOGD("IsochronousClockModel::mMarkerNanoTime      = %lld", (long long) mMarkerNanoTime);
+    ALOGD("IsochronousClockModel::mSampleRate          = %6d", mSampleRate);
+    ALOGD("IsochronousClockModel::mFramesPerBurst      = %6d", mFramesPerBurst);
+    ALOGD("IsochronousClockModel::mMaxLatenessInNanos  = %6d", mMaxLatenessInNanos);
+    ALOGD("IsochronousClockModel::mState               = %6d", mState);
+}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 0314f55..585f53a 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -43,6 +43,8 @@
      */
     void setSampleRate(int32_t sampleRate);
 
+    void setPositionAndTime(int64_t framePosition, int64_t nanoTime);
+
     int32_t getSampleRate() const {
         return mSampleRate;
     }
@@ -86,6 +88,8 @@
      */
     int64_t convertDeltaTimeToPosition(int64_t nanosDelta) const;
 
+    void dump() const;
+
 private:
     enum clock_model_state_t {
         STATE_STOPPED,
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 76f98fa..3f5de77 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -253,8 +253,10 @@
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestStart(%p)", stream);
-    return audioStream->requestStart();
+    ALOGD("AAudioStream_requestStart(%p) called --------------", stream);
+    aaudio_result_t result = audioStream->requestStart();
+    ALOGD("AAudioStream_requestStart(%p) returned ------------", stream);
+    return result;
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream)
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index e1e3c55..19b08c4 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -36,8 +36,30 @@
     setPeriodNanoseconds(0);
 }
 
+static const char *AudioStream_convertSharingModeToShortText(aaudio_sharing_mode_t sharingMode) {
+    const char *result;
+    switch (sharingMode) {
+        case AAUDIO_SHARING_MODE_EXCLUSIVE:
+            result = "EX";
+            break;
+        case AAUDIO_SHARING_MODE_SHARED:
+            result = "SH";
+            break;
+        default:
+            result = "?!";
+            break;
+    }
+    return result;
+}
+
 aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
 {
+    // Call here as well because the AAudioService will call this without calling build().
+    aaudio_result_t result = builder.validate();
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
     // Copy parameters from the Builder because the Builder may be deleted after this call.
     mSamplesPerFrame = builder.getSamplesPerFrame();
     mSampleRate = builder.getSampleRate();
@@ -56,44 +78,13 @@
     mErrorCallbackUserData = builder.getErrorCallbackUserData();
 
     // This is very helpful for debugging in the future. Please leave it in.
-    ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %d, dir = %s",
-          mSampleRate, mSamplesPerFrame, mFormat, mSharingMode,
+    ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %s, dir = %s",
+          mSampleRate, mSamplesPerFrame, mFormat,
+          AudioStream_convertSharingModeToShortText(mSharingMode),
           (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
     ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
           mDeviceId, mPerformanceMode, mFramesPerDataCallback);
 
-    // Check for values that are ridiculously out of range to prevent math overflow exploits.
-    // The service will do a better check.
-    if (mSamplesPerFrame < 0 || mSamplesPerFrame > 128) {
-        ALOGE("AudioStream::open(): samplesPerFrame out of range = %d", mSamplesPerFrame);
-        return AAUDIO_ERROR_OUT_OF_RANGE;
-    }
-
-    switch(mFormat) {
-        case AAUDIO_FORMAT_UNSPECIFIED:
-        case AAUDIO_FORMAT_PCM_I16:
-        case AAUDIO_FORMAT_PCM_FLOAT:
-            break; // valid
-        default:
-            ALOGE("AudioStream::open(): audioFormat not valid = %d", mFormat);
-            return AAUDIO_ERROR_INVALID_FORMAT;
-            // break;
-    }
-
-    if (mSampleRate != AAUDIO_UNSPECIFIED && (mSampleRate < 8000 || mSampleRate > 1000000)) {
-        ALOGE("AudioStream::open(): mSampleRate out of range = %d", mSampleRate);
-        return AAUDIO_ERROR_INVALID_RATE;
-    }
-
-    switch(mPerformanceMode) {
-        case AAUDIO_PERFORMANCE_MODE_NONE:
-        case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
-        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
-            break;
-        default:
-            ALOGE("AudioStream::open(): illegal performanceMode %d", mPerformanceMode);
-            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-    }
 
     return AAUDIO_OK;
 }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 39c9f9c..7c7fcc5 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -186,13 +186,9 @@
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
-    virtual int64_t getFramesWritten() {
-        return mFramesWritten.get();
-    }
+    virtual int64_t getFramesWritten() = 0;
 
-    virtual int64_t getFramesRead() {
-        return mFramesRead.get();
-    }
+    virtual int64_t getFramesRead() = 0;
 
     AAudioStream_dataCallback getDataCallbackProc() const {
         return mDataCallbackProc;
@@ -232,13 +228,6 @@
 
 protected:
 
-    virtual int64_t incrementFramesWritten(int32_t frames) {
-        return mFramesWritten.increment(frames);
-    }
-
-    virtual int64_t incrementFramesRead(int32_t frames) {
-        return mFramesRead.increment(frames);
-    }
 
     /**
      * This should not be called after the open() call.
@@ -281,8 +270,6 @@
     std::atomic<bool>    mCallbackEnabled;
 
 protected:
-    MonotonicCounter     mFramesWritten;
-    MonotonicCounter     mFramesRead;
 
     void setPeriodNanoseconds(int64_t periodNanoseconds) {
         mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 4262f27..8a481c0 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -37,6 +37,19 @@
 #define AAUDIO_MMAP_POLICY_DEFAULT             AAUDIO_POLICY_NEVER
 #define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT   AAUDIO_POLICY_NEVER
 
+// These values are for a pre-check before we ask the lower level service to open a stream.
+// So they are just outside the maximum conceivable range of value,
+// on the edge of being ridiculous.
+// TODO These defines should be moved to a central place in audio.
+#define SAMPLES_PER_FRAME_MIN        1
+// TODO Remove 8 channel limitation.
+#define SAMPLES_PER_FRAME_MAX        FCC_8
+#define SAMPLE_RATE_HZ_MIN           8000
+// HDMI supports up to 32 channels at 1536000 Hz.
+#define SAMPLE_RATE_HZ_MAX           1600000
+#define FRAMES_PER_DATA_CALLBACK_MIN 1
+#define FRAMES_PER_DATA_CALLBACK_MAX (1024 * 1024)
+
 /*
  * AudioStreamBuilder
  */
@@ -85,8 +98,17 @@
 // Exact behavior is controlled by MMapPolicy.
 aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
     AudioStream *audioStream = nullptr;
+    if (streamPtr == nullptr) {
+        ALOGE("AudioStreamBuilder::build() streamPtr is null");
+        return AAUDIO_ERROR_NULL;
+    }
     *streamPtr = nullptr;
 
+    aaudio_result_t result = validate();
+    if (result != AAUDIO_OK) {
+        return result;
+    }
+
     // The API setting is the highest priority.
     aaudio_policy_t mmapPolicy = AAudio_getMMapPolicy();
     // If not specified then get from a system property.
@@ -116,8 +138,7 @@
     bool allowMMap = mmapPolicy != AAUDIO_POLICY_NEVER;
     bool allowLegacy = mmapPolicy != AAUDIO_POLICY_ALWAYS;
 
-    aaudio_result_t result = builder_createStream(getDirection(), sharingMode,
-                                                  allowMMap, &audioStream);
+    result = builder_createStream(getDirection(), sharingMode, allowMMap, &audioStream);
     if (result == AAUDIO_OK) {
         // Open the stream using the parameters from the builder.
         result = audioStream->open(*this);
@@ -147,3 +168,83 @@
 
     return result;
 }
+
+aaudio_result_t AudioStreamBuilder::validate() const {
+
+    // Check for values that are ridiculously out of range to prevent math overflow exploits.
+    // The service will do a better check.
+    if (mSamplesPerFrame != AAUDIO_UNSPECIFIED
+        && (mSamplesPerFrame < SAMPLES_PER_FRAME_MIN || mSamplesPerFrame > SAMPLES_PER_FRAME_MAX)) {
+        ALOGE("AudioStreamBuilder: channelCount out of range = %d", mSamplesPerFrame);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    if (mDeviceId < 0) {
+        ALOGE("AudioStreamBuilder: deviceId out of range = %d", mDeviceId);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    switch (mSharingMode) {
+        case AAUDIO_SHARING_MODE_EXCLUSIVE:
+        case AAUDIO_SHARING_MODE_SHARED:
+            break;
+        default:
+            ALOGE("AudioStreamBuilder: illegal sharingMode = %d", mSharingMode);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
+    switch (mFormat) {
+        case AAUDIO_FORMAT_UNSPECIFIED:
+        case AAUDIO_FORMAT_PCM_I16:
+        case AAUDIO_FORMAT_PCM_FLOAT:
+            break; // valid
+        default:
+            ALOGE("AudioStreamBuilder: audioFormat not valid = %d", mFormat);
+            return AAUDIO_ERROR_INVALID_FORMAT;
+            // break;
+    }
+
+    switch (mDirection) {
+        case AAUDIO_DIRECTION_INPUT:
+        case AAUDIO_DIRECTION_OUTPUT:
+            break; // valid
+        default:
+            ALOGE("AudioStreamBuilder: direction not valid = %d", mDirection);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
+    if (mSampleRate != AAUDIO_UNSPECIFIED
+        && (mSampleRate < SAMPLE_RATE_HZ_MIN || mSampleRate > SAMPLE_RATE_HZ_MAX)) {
+        ALOGE("AudioStreamBuilder: sampleRate out of range = %d", mSampleRate);
+        return AAUDIO_ERROR_INVALID_RATE;
+    }
+
+    if (mBufferCapacity < 0) {
+        ALOGE("AudioStreamBuilder: bufferCapacity out of range = %d", mBufferCapacity);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    switch (mPerformanceMode) {
+        case AAUDIO_PERFORMANCE_MODE_NONE:
+        case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+            break;
+        default:
+            ALOGE("AudioStreamBuilder: illegal performanceMode = %d", mPerformanceMode);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
+    // Prevent ridiculous values from causing problems.
+    if (mFramesPerDataCallback != AAUDIO_UNSPECIFIED
+        && (mFramesPerDataCallback < FRAMES_PER_DATA_CALLBACK_MIN
+            || mFramesPerDataCallback > FRAMES_PER_DATA_CALLBACK_MAX)) {
+        ALOGE("AudioStreamBuilder: framesPerDataCallback out of range = %d",
+              mFramesPerDataCallback);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    return AAUDIO_OK;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index fd416c4..d757592 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -165,6 +165,8 @@
 
     aaudio_result_t build(AudioStream **streamPtr);
 
+    aaudio_result_t validate() const;
+
 private:
     int32_t                    mSamplesPerFrame = AAUDIO_UNSPECIFIED;
     int32_t                    mSampleRate = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 0ded8e1..d2ef3c7 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -74,6 +74,15 @@
 
     virtual int64_t incrementClientFrameCounter(int32_t frames)  = 0;
 
+
+    virtual int64_t getFramesWritten() override {
+        return mFramesWritten.get();
+    }
+
+    virtual int64_t getFramesRead() override {
+        return mFramesRead.get();
+    }
+
 protected:
 
     class StreamDeviceCallback : public android::AudioSystem::AudioDeviceCallback
@@ -103,6 +112,17 @@
     void onStart() { mCallbackEnabled.store(true); }
     void onStop() { mCallbackEnabled.store(false); }
 
+    int64_t incrementFramesWritten(int32_t frames) {
+        return mFramesWritten.increment(frames);
+    }
+
+    int64_t incrementFramesRead(int32_t frames) {
+        return mFramesRead.increment(frames);
+    }
+
+    MonotonicCounter           mFramesWritten;
+    MonotonicCounter           mFramesRead;
+
     FixedBlockAdapter         *mBlockAdapter = nullptr;
     aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
     int32_t                    mCallbackBufferSize = 0;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 7e39908..77f31e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -423,7 +423,7 @@
     default:
         break;
     }
-    return AudioStream::getFramesRead();
+    return AudioStreamLegacy::getFramesRead();
 }
 
 aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 164784d..2d8ac6e 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -208,9 +208,12 @@
     status_t status;
     switch (result) {
     case AAUDIO_ERROR_DISCONNECTED:
-    case AAUDIO_ERROR_INVALID_HANDLE:
+    case AAUDIO_ERROR_NO_SERVICE:
         status = DEAD_OBJECT;
         break;
+    case AAUDIO_ERROR_INVALID_HANDLE:
+        status = BAD_TYPE;
+        break;
     case AAUDIO_ERROR_INVALID_STATE:
         status = INVALID_OPERATION;
         break;
@@ -233,7 +236,6 @@
     case AAUDIO_ERROR_NO_FREE_HANDLES:
     case AAUDIO_ERROR_NO_MEMORY:
     case AAUDIO_ERROR_TIMEOUT:
-    case AAUDIO_ERROR_NO_SERVICE:
     default:
         status = UNKNOWN_ERROR;
         break;
@@ -257,12 +259,12 @@
     case INVALID_OPERATION:
         result = AAUDIO_ERROR_INVALID_STATE;
         break;
-        case UNEXPECTED_NULL:
-            result = AAUDIO_ERROR_NULL;
-            break;
-        case BAD_VALUE:
-            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-            break;
+    case UNEXPECTED_NULL:
+        result = AAUDIO_ERROR_NULL;
+        break;
+    case BAD_VALUE:
+        result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+        break;
     case WOULD_BLOCK:
         result = AAUDIO_ERROR_WOULD_BLOCK;
         break;
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index afcdebf..e6c779b 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -6,9 +6,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_handle_tracker.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := test_handle_tracker
 include $(BUILD_NATIVE_TEST)
 
@@ -18,9 +16,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_marshalling.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
 LOCAL_MODULE := test_aaudio_marshalling
 include $(BUILD_NATIVE_TEST)
 
@@ -30,9 +26,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_block_adapter.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := test_block_adapter
 include $(BUILD_NATIVE_TEST)
 
@@ -42,9 +36,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_linear_ramp.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := test_linear_ramp
 include $(BUILD_NATIVE_TEST)
 
@@ -54,8 +46,6 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_open_params.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
 LOCAL_MODULE := test_open_params
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 9ef1db7..ed1e6d1 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1225,6 +1225,13 @@
     return aps->getMasterMono(mono);
 }
 
+float AudioSystem::getStreamVolumeDB(audio_stream_type_t stream, int index, audio_devices_t device)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return NAN;
+    return aps->getStreamVolumeDB(stream, index, device);
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index f0f413d..b72047b 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include <stdint.h>
+#include <math.h>
 #include <sys/types.h>
 
 #include <binder/Parcel.h>
@@ -77,6 +78,7 @@
     SET_AUDIO_PORT_CALLBACK_ENABLED,
     SET_MASTER_MONO,
     GET_MASTER_MONO,
+    GET_STREAM_VOLUME_DB
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -815,6 +817,20 @@
         }
         return status;
     }
+
+    virtual float getStreamVolumeDB(audio_stream_type_t stream, int index, audio_devices_t device)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(static_cast <int32_t>(stream));
+        data.writeInt32(static_cast <int32_t>(index));
+        data.writeUint32(static_cast <uint32_t>(device));
+        status_t status = remote()->transact(GET_STREAM_VOLUME_DB, data, &reply);
+        if (status != NO_ERROR) {
+            return NAN;
+        }
+        return reply.readFloat();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1405,6 +1421,17 @@
             return NO_ERROR;
         } break;
 
+        case GET_STREAM_VOLUME_DB: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_stream_type_t stream =
+                    static_cast <audio_stream_type_t>(data.readInt32());
+            int index = static_cast <int>(data.readInt32());
+            audio_devices_t device =
+                    static_cast <audio_devices_t>(data.readUint32());
+            reply->writeFloat(getStreamVolumeDB(stream, index, device));
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
index cbef1b3..7868318 100644
--- a/media/libaudioclient/PlayerBase.cpp
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -79,7 +79,7 @@
     }
 }
 
-//FIXME temporary method while some AudioTrack state is outside of this class
+//FIXME temporary method while some player state is outside of this class
 void PlayerBase::reportEvent(player_state_t event) {
     servicePlayerEvent(event);
 }
@@ -87,10 +87,30 @@
 status_t PlayerBase::startWithStatus() {
     status_t status = playerStart();
     if (status == NO_ERROR) {
-        ALOGD("PlayerBase::start() from IPlayer");
         servicePlayerEvent(PLAYER_STATE_STARTED);
     } else {
-        ALOGD("PlayerBase::start() no AudioTrack to start from IPlayer");
+        ALOGW("PlayerBase::start() error %d", status);
+    }
+    return status;
+}
+
+status_t PlayerBase::pauseWithStatus() {
+    status_t status = playerPause();
+    if (status == NO_ERROR) {
+        servicePlayerEvent(PLAYER_STATE_PAUSED);
+    } else {
+        ALOGW("PlayerBase::pause() error %d", status);
+    }
+    return status;
+}
+
+
+status_t PlayerBase::stopWithStatus() {
+    status_t status = playerStop();
+    if (status == NO_ERROR) {
+        servicePlayerEvent(PLAYER_STATE_STOPPED);
+    } else {
+        ALOGW("PlayerBase::stop() error %d", status);
     }
     return status;
 }
@@ -98,42 +118,36 @@
 //------------------------------------------------------------------------------
 // Implementation of IPlayer
 void PlayerBase::start() {
+    ALOGD("PlayerBase::start() from IPlayer");
     (void)startWithStatus();
 }
 
 void PlayerBase::pause() {
-    if (playerPause() == NO_ERROR) {
-        ALOGD("PlayerBase::pause() from IPlayer");
-        servicePlayerEvent(PLAYER_STATE_PAUSED);
-    } else {
-        ALOGD("PlayerBase::pause() no AudioTrack to pause from IPlayer");
-    }
+    ALOGD("PlayerBase::pause() from IPlayer");
+    (void)pauseWithStatus();
 }
 
 
 void PlayerBase::stop() {
-    if (playerStop() == NO_ERROR) {
-        ALOGD("PlayerBase::stop() from IPlayer");
-        servicePlayerEvent(PLAYER_STATE_STOPPED);
-    } else {
-        ALOGD("PlayerBase::stop() no AudioTrack to stop from IPlayer");
-    }
+    ALOGD("PlayerBase::stop() from IPlayer");
+    (void)stopWithStatus();
 }
 
 void PlayerBase::setVolume(float vol) {
+    ALOGD("PlayerBase::setVolume() from IPlayer");
     {
         Mutex::Autolock _l(mSettingsLock);
         mVolumeMultiplierL = vol;
         mVolumeMultiplierR = vol;
     }
-    if (playerSetVolume() == NO_ERROR) {
-        ALOGD("PlayerBase::setVolume() from IPlayer");
-    } else {
-        ALOGD("PlayerBase::setVolume() no AudioTrack for volume control from IPlayer");
+    status_t status = playerSetVolume();
+    if (status != NO_ERROR) {
+        ALOGW("PlayerBase::setVolume() error %d", status);
     }
 }
 
 void PlayerBase::setPan(float pan) {
+    ALOGD("PlayerBase::setPan() from IPlayer");
     {
         Mutex::Autolock _l(mSettingsLock);
         pan = min(max(-1.0f, pan), 1.0f);
@@ -145,10 +159,9 @@
             mPanMultiplierR = 1.0f + pan;
         }
     }
-    if (playerSetVolume() == NO_ERROR) {
-        ALOGD("PlayerBase::setPan() from IPlayer");
-    } else {
-        ALOGD("PlayerBase::setPan() no AudioTrack for volume control from IPlayer");
+    status_t status = playerSetVolume();
+    if (status != NO_ERROR) {
+        ALOGW("PlayerBase::setPan() error %d", status);
     }
 }
 
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 853d318..2875794 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -338,6 +338,9 @@
     static status_t setMasterMono(bool mono);
     static status_t getMasterMono(bool *mono);
 
+    static float    getStreamVolumeDB(
+            audio_stream_type_t stream, int index, audio_devices_t device);
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index d111fd2..ac71873 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -169,6 +169,8 @@
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
+    virtual float    getStreamVolumeDB(
+            audio_stream_type_t stream, int index, audio_devices_t device) = 0;
 };
 
 
diff --git a/media/libaudioclient/include/media/PlayerBase.h b/media/libaudioclient/include/media/PlayerBase.h
index fe1db7b..e63090b 100644
--- a/media/libaudioclient/include/media/PlayerBase.h
+++ b/media/libaudioclient/include/media/PlayerBase.h
@@ -48,6 +48,8 @@
 
 
             status_t startWithStatus();
+            status_t pauseWithStatus();
+            status_t stopWithStatus();
 
             //FIXME temporary method while some player state is outside of this class
             void reportEvent(player_state_t event);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index ab33cea..bdeecb0 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1039,10 +1039,7 @@
         array[i] = {mBuffers[portIndex][i].mData, mBuffers[portIndex][i].mBufferID};
     }
     if (portIndex == kPortIndexInput) {
-        err = mBufferChannel->setInputBufferArray(array);
-        if (err != OK) {
-            return err;
-        }
+        mBufferChannel->setInputBufferArray(array);
     } else if (portIndex == kPortIndexOutput) {
         mBufferChannel->setOutputBufferArray(array);
     } else {
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 796d3dc..0d9696f 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -284,7 +284,7 @@
     return dealer;
 }
 
-status_t ACodecBufferChannel::setInputBufferArray(const std::vector<BufferAndId> &array) {
+void ACodecBufferChannel::setInputBufferArray(const std::vector<BufferAndId> &array) {
     if (hasCryptoOrDescrambler()) {
         size_t totalSize = std::accumulate(
                 array.begin(), array.end(), 0u,
@@ -311,15 +311,11 @@
         if (hasCryptoOrDescrambler()) {
             sharedEncryptedBuffer = mDealer->allocate(elem.mBuffer->capacity());
         }
-        if (elem.mBuffer->data() == NULL && sharedEncryptedBuffer == NULL) {
-            return BAD_VALUE;
-        }
         inputBuffers.emplace_back(elem.mBuffer, elem.mBufferId, sharedEncryptedBuffer);
     }
     std::atomic_store(
             &mInputBuffers,
             std::make_shared<const std::vector<const BufferInfo>>(inputBuffers));
-    return OK;
 }
 
 void ACodecBufferChannel::setOutputBufferArray(const std::vector<BufferAndId> &array) {
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 6a5a229..f2b1f10 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -66,7 +66,8 @@
       mNumFramesReceived(0),
       mNumFramesSkipped(0),
       mNumFramesLost(0),
-      mNumClientOwnedBuffers(0) {
+      mNumClientOwnedBuffers(0),
+      mNoMoreFramesToRead(false) {
     ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
             sampleRate, outSampleRate, channelCount);
     CHECK(channelCount == 1 || channelCount == 2);
@@ -178,6 +179,7 @@
 
     mStarted = false;
     mStopSystemTimeUs = -1;
+    mNoMoreFramesToRead = false;
     mFrameAvailableCondition.signal();
 
     mRecord->stop();
@@ -246,6 +248,9 @@
 
     while (mStarted && mBuffersReceived.empty()) {
         mFrameAvailableCondition.wait(mLock);
+        if (mNoMoreFramesToRead) {
+            return OK;
+        }
     }
     if (!mStarted) {
         return OK;
@@ -359,6 +364,8 @@
     if (mStopSystemTimeUs != -1 && timeUs >= mStopSystemTimeUs) {
         ALOGV("Drop Audio frame at %lld  stop time: %lld us",
                 (long long)timeUs, (long long)mStopSystemTimeUs);
+        mNoMoreFramesToRead = true;
+        mFrameAvailableCondition.signal();
         return OK;
     }
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index bbdcf0b..ecd62b0 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -3125,6 +3125,13 @@
         }
         case FOURCC('y', 'r', 'r', 'c'):
         {
+            if (size < 6) {
+                delete[] buffer;
+                buffer = NULL;
+                ALOGE("b/62133227");
+                android_errorWriteLog(0x534e4554, "62133227");
+                return ERROR_MALFORMED;
+            }
             char tmp[5];
             uint16_t year = U16_AT(&buffer[4]);
 
@@ -3147,6 +3154,8 @@
 
         // smallest possible valid UTF-16 string w BOM: 0xfe 0xff 0x00 0x00
         if (size < 6) {
+            delete[] buffer;
+            buffer = NULL;
             return ERROR_MALFORMED;
         }
 
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
old mode 100755
new mode 100644
index ed952e9..52e1626
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -343,6 +343,7 @@
 
     void dumpTimeStamps();
 
+    int64_t getStartTimeOffsetScaledTimeUs() const;
     int32_t getStartTimeOffsetScaledTime() const;
 
     static void *ThreadWrapper(void *me);
@@ -3162,7 +3163,7 @@
 }
 
 int64_t MPEG4Writer::Track::getDurationUs() const {
-    return mTrackDurationUs;
+    return mTrackDurationUs + getStartTimeOffsetScaledTimeUs();
 }
 
 int64_t MPEG4Writer::Track::getEstimatedTrackSizeBytes() const {
@@ -3646,14 +3647,18 @@
     mOwner->endBox();  // pasp
 }
 
-int32_t MPEG4Writer::Track::getStartTimeOffsetScaledTime() const {
+int64_t MPEG4Writer::Track::getStartTimeOffsetScaledTimeUs() const {
     int64_t trackStartTimeOffsetUs = 0;
     int64_t moovStartTimeUs = mOwner->getStartTimestampUs();
     if (mStartTimestampUs != moovStartTimeUs) {
         CHECK_GT(mStartTimestampUs, moovStartTimeUs);
         trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs;
     }
-    return (trackStartTimeOffsetUs *  mTimeScale + 500000LL) / 1000000LL;
+    return trackStartTimeOffsetUs;
+}
+
+int32_t MPEG4Writer::Track::getStartTimeOffsetScaledTime() const {
+    return (getStartTimeOffsetScaledTimeUs() *  mTimeScale + 500000LL) / 1000000LL;
 }
 
 void MPEG4Writer::Track::writeSttsBox() {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 280dd95..bd71632 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -735,8 +735,7 @@
             mAnalyticsItem->setInt32(kCodecCrypto, 1);
         }
     } else if (mFlags & kFlagIsSecure) {
-        ALOGE("Crypto or descrambler should be given for secure codec");
-        return BAD_VALUE;
+        ALOGW("Crypto or descrambler should be given for secure codec");
     }
 
     // save msg for reset
@@ -2596,6 +2595,10 @@
     if (csd->size() > codecInputData->capacity()) {
         return -EINVAL;
     }
+    if (codecInputData->data() == NULL) {
+        ALOGV("Input buffer %zu is not properly allocated", bufferIndex);
+        return -EINVAL;
+    }
 
     memcpy(codecInputData->data(), csd->data(), csd->size());
 
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 8a569c9..6e70ded 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -62,6 +62,7 @@
             kProfileLevels, ARRAY_SIZE(kProfileLevels),
             320 /* width */, 240 /* height */, callbacks,
             appData, component),
+      mCodecCtx(NULL),
       mMemRecords(NULL),
       mFlushOutBuffer(NULL),
       mOmxColorFormat(OMX_COLOR_FormatYUV420Planar),
@@ -69,18 +70,22 @@
       mNewWidth(mWidth),
       mNewHeight(mHeight),
       mChangingResolution(false),
+      mSignalledError(false),
       mStride(mWidth) {
     initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
 
     // If input dump is enabled, then open create an empty file
     GENERATE_FILE_NAMES();
     CREATE_DUMP_FILE(mInFile);
-
-    CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
 SoftMPEG2::~SoftMPEG2() {
-    CHECK_EQ(deInitDecoder(), (status_t)OK);
+    if (OK != deInitDecoder()) {
+        ALOGE("Failed to deinit decoder");
+        notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+        mSignalledError = true;
+        return;
+    }
 }
 
 
@@ -198,6 +203,7 @@
     setNumCores();
 
     mStride = 0;
+    mSignalledError = false;
 
     return OK;
 }
@@ -427,6 +433,7 @@
 
     mInitNeeded = true;
     mChangingResolution = false;
+    mCodecCtx = NULL;
 
     return OK;
 }
@@ -438,10 +445,11 @@
 
     ret = initDecoder();
     if (OK != ret) {
-        ALOGE("Create failure");
+        ALOGE("Failed to initialize decoder");
         deInitDecoder();
-        return NO_MEMORY;
+        return ret;
     }
+    mSignalledError = false;
     return OK;
 }
 
@@ -580,10 +588,22 @@
 void SoftMPEG2::onQueueFilled(OMX_U32 portIndex) {
     UNUSED(portIndex);
 
+    if (mSignalledError) {
+        return;
+    }
     if (mOutputPortSettingsChange != NONE) {
         return;
     }
 
+    if (NULL == mCodecCtx) {
+        if (OK != initDecoder()) {
+            ALOGE("Failed to initialize decoder");
+            notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+            mSignalledError = true;
+            return;
+        }
+    }
+
     List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
 
@@ -636,7 +656,9 @@
             bool portWillReset = false;
             handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
 
-            CHECK_EQ(reInitDecoder(), (status_t)OK);
+            if (OK != reInitDecoder()) {
+                ALOGE("Failed to reinitialize decoder");
+            }
             return;
         }
 
@@ -709,7 +731,10 @@
                 bool portWillReset = false;
                 handlePortSettingsChange(&portWillReset, s_dec_op.u4_pic_wd, s_dec_op.u4_pic_ht);
 
-                CHECK_EQ(reInitDecoder(), (status_t)OK);
+                if (OK != reInitDecoder()) {
+                    ALOGE("Failed to reinitialize decoder");
+                    return;
+                }
 
                 if (setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
                     ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
index 1921a23..6729a54 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
@@ -106,6 +106,7 @@
     // codec. So the codec is switching to decode the new resolution.
     bool mChangingResolution;
     bool mFlushNeeded;
+    bool mSignalledError;
     bool mWaitForI;
     size_t mStride;
 
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
index 349f1f8..0da2e81 100644
--- a/media/libstagefright/include/ACodecBufferChannel.h
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -87,9 +87,8 @@
      *
      * @param array     Newly allocated buffers. Empty if buffers are
      *                  deallocated.
-     * @return OK if no error.
      */
-    status_t setInputBufferArray(const std::vector<BufferAndId> &array);
+    void setInputBufferArray(const std::vector<BufferAndId> &array);
     /**
      * Set output buffer array.
      *
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 07a51bf..1595be4 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -95,6 +95,7 @@
     int64_t mNumFramesSkipped;
     int64_t mNumFramesLost;
     int64_t mNumClientOwnedBuffers;
+    bool mNoMoreFramesToRead;
 
     List<MediaBuffer * > mBuffersReceived;
 
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 4c48e8b..9763bf2 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -411,6 +411,21 @@
         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
                 mFrameSize, !isExternalTrack(), sampleRate);
     } else {
+        // Is the shared buffer of sufficient size?
+        // (frameCount * mFrameSize) is <= SIZE_MAX, checked in TrackBase.
+        if (sharedBuffer->size() < frameCount * mFrameSize) {
+            // Workaround: clear out mCblk to indicate track hasn't been properly created.
+            mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
+            if (mClient == 0) {
+                free(mCblk);
+            }
+            mCblk = NULL;
+
+            mSharedBuffer.clear(); // release shared buffer early
+            android_errorWriteLog(0x534e4554, "38340117");
+            return;
+        }
+
         mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
                 mFrameSize);
     }
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 60ed1d6..84e9426 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -241,6 +241,9 @@
 
     virtual status_t setMasterMono(bool mono) = 0;
     virtual status_t getMasterMono(bool *mono) = 0;
+
+    virtual float    getStreamVolumeDB(
+                audio_stream_type_t stream, int index, audio_devices_t device) = 0;
 };
 
 
diff --git a/services/audiopolicy/config/usb_audio_policy_configuration.xml b/services/audiopolicy/config/usb_audio_policy_configuration.xml
index 1630a94..a487ecb 100644
--- a/services/audiopolicy/config/usb_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/usb_audio_policy_configuration.xml
@@ -30,14 +30,18 @@
                      samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
         </devicePort>
         <devicePort tagName="USB Device Out" type="AUDIO_DEVICE_OUT_USB_DEVICE" role="sink"/>
+        <devicePort tagName="USB Headset Out" type="AUDIO_DEVICE_OUT_USB_HEADSET" role="sink"/>
         <devicePort tagName="USB Device In" type="AUDIO_DEVICE_IN_USB_DEVICE" role="source"/>
+        <devicePort tagName="USB Headset In" type="AUDIO_DEVICE_IN_USB_HEADSET" role="source"/>
     </devicePorts>
     <routes>
         <route type="mix" sink="USB Host Out"
                sources="usb_accessory output"/>
         <route type="mix" sink="USB Device Out"
                sources="usb_device output"/>
+        <route type="mix" sink="USB Headset Out"
+               sources="usb_device output"/>
         <route type="mix" sink="usb_device input"
-               sources="USB Device In"/>
+               sources="USB Device In,USB Headset In"/>
     </routes>
 </module>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index e0ce2d6..72924b8 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1595,6 +1595,10 @@
         } else {
             halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
         }
+    } else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
+               getPhoneState() == AUDIO_MODE_IN_COMMUNICATION &&
+               audio_is_linear_pcm(format)) {
+        flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
     }
 
     // find a compatible input profile (not necessarily identical in parameters)
@@ -3372,6 +3376,12 @@
     return NO_ERROR;
 }
 
+float AudioPolicyManager::getStreamVolumeDB(
+        audio_stream_type_t stream, int index, audio_devices_t device)
+{
+    return computeVolume(stream, index, device);
+}
+
 status_t AudioPolicyManager::disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
 {
     ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index c831d46..c62ba25 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -234,6 +234,8 @@
 
         virtual status_t setMasterMono(bool mono);
         virtual status_t getMasterMono(bool *mono);
+        virtual float    getStreamVolumeDB(
+                    audio_stream_type_t stream, int index, audio_devices_t device);
 
         // return the strategy corresponding to a given stream type
         routing_strategy getStrategy(audio_stream_type_t stream) const;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 1e63a05..8a2f1dc 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -769,4 +769,16 @@
     return mAudioPolicyManager->getMasterMono(mono);
 }
 
+
+float AudioPolicyService::getStreamVolumeDB(
+            audio_stream_type_t stream, int index, audio_devices_t device)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NAN;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getStreamVolumeDB(stream, index, device);
+}
+
+
 }; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 9a083f4..2cd26ac 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -202,6 +202,9 @@
     virtual status_t setMasterMono(bool mono);
     virtual status_t getMasterMono(bool *mono);
 
+    virtual float    getStreamVolumeDB(
+                audio_stream_type_t stream, int index, audio_devices_t device);
+
             status_t doStopOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   audio_session_t session);
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 16c7f2d..f1cdea3 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -54,7 +54,8 @@
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp \
-    utils/TagMonitor.cpp
+    utils/TagMonitor.cpp \
+    utils/LatencyHistogram.cpp
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index d6ed3ff..a11f4e2 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -613,6 +613,11 @@
     }
     write(fd, lines.string(), lines.size());
 
+    if (mRequestThread != NULL) {
+        mRequestThread->dumpCaptureRequestLatency(fd,
+                "    ProcessCaptureRequest latency histogram:");
+    }
+
     {
         lines = String8("    Last request sent:\n");
         write(fd, lines.string(), lines.size());
@@ -3426,7 +3431,8 @@
         mCurrentPreCaptureTriggerId(0),
         mRepeatingLastFrameNumber(
             hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
-        mPrepareVideoStream(false) {
+        mPrepareVideoStream(false),
+        mRequestLatency(kRequestLatencyBinSize) {
     mStatusId = statusTracker->addComponent();
 }
 
@@ -3651,6 +3657,9 @@
     // The exit from any possible waits
     mDoPauseSignal.signal();
     mRequestSignal.signal();
+
+    mRequestLatency.log("ProcessCaptureRequest latency histogram");
+    mRequestLatency.reset();
 }
 
 void Camera3Device::RequestThread::checkAndStopRepeatingRequest() {
@@ -3867,11 +3876,14 @@
             mNextRequests.size());
 
     bool submitRequestSuccess = false;
+    nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
     if (mInterface->supportBatchRequest()) {
         submitRequestSuccess = sendRequestsBatch();
     } else {
         submitRequestSuccess = sendRequestsOneByOne();
     }
+    nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+    mRequestLatency.add(tRequestStart, tRequestEnd);
 
     if (useFlushLock) {
         mFlushLock.unlock();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 1ca6811..bfb58c6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -40,6 +40,7 @@
 #include "device3/StatusTracker.h"
 #include "device3/Camera3BufferManager.h"
 #include "utils/TagMonitor.h"
+#include "utils/LatencyHistogram.h"
 #include <camera_metadata_hidden.h>
 
 /**
@@ -699,6 +700,11 @@
          */
         bool isStreamPending(sp<camera3::Camera3StreamInterface>& stream);
 
+        // dump processCaptureRequest latency
+        void dumpCaptureRequestLatency(int fd, const char* name) {
+            mRequestLatency.dump(fd, name);
+        }
+
       protected:
 
         virtual bool threadLoop();
@@ -820,6 +826,9 @@
 
         // Flag indicating if we should prepare video stream for video requests.
         bool               mPrepareVideoStream;
+
+        static const int32_t kRequestLatencyBinSize = 40; // in ms
+        CameraLatencyHistogram mRequestLatency;
     };
     sp<RequestThread> mRequestThread;
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index cb39244..7ad2300 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -85,6 +85,8 @@
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
             mTotalBufferCount, mHandoutTotalBufferCount);
     write(fd, lines.string(), lines.size());
+
+    Camera3Stream::dump(fd, args);
 }
 
 status_t Camera3IOStreamBase::configureQueueLocked() {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index ec0f508..e15aa43 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -43,7 +43,8 @@
         mTraceFirstBuffer(true),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(0) {
+        mConsumerUsage(0),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (mConsumer == NULL) {
         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
@@ -68,7 +69,8 @@
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(0) {
+        mConsumerUsage(0),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
@@ -97,7 +99,8 @@
         mTraceFirstBuffer(true),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(consumerUsage) {
+        mConsumerUsage(consumerUsage),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
     // Deferred consumer only support preview surface format now.
     if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
         ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
@@ -134,7 +137,8 @@
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(consumerUsage) {
+        mConsumerUsage(consumerUsage),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
         mBufferReleasedListener = new BufferReleasedListener(this);
@@ -290,6 +294,9 @@
     write(fd, lines.string(), lines.size());
 
     Camera3IOStreamBase::dump(fd, args);
+
+    mDequeueBufferLatency.dump(fd,
+        "      DequeueBuffer latency histogram:");
 }
 
 status_t Camera3OutputStream::setTransform(int transform) {
@@ -529,7 +536,11 @@
         sp<ANativeWindow> currentConsumer = mConsumer;
         mLock.unlock();
 
+        nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
         res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
+        nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+        mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
+
         mLock.lock();
         if (res != OK) {
             ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
@@ -611,6 +622,9 @@
 
     mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
                                            : STATE_CONSTRUCTED;
+
+    mDequeueBufferLatency.log("Stream %d dequeueBuffer latency histogram", mId);
+    mDequeueBufferLatency.reset();
     return OK;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 98ffb73..97aa7d4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -21,6 +21,7 @@
 #include <gui/IProducerListener.h>
 #include <gui/Surface.h>
 
+#include "utils/LatencyHistogram.h"
 #include "Camera3Stream.h"
 #include "Camera3IOStreamBase.h"
 #include "Camera3OutputStreamInterface.h"
@@ -269,6 +270,9 @@
     void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
     status_t detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    static const int32_t kDequeueLatencyBinSize = 5; // in ms
+    CameraLatencyHistogram mDequeueBufferLatency;
+
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index b45ef77..ba352c4 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -60,7 +60,8 @@
     mOldMaxBuffers(0),
     mPrepared(false),
     mPreparedBufferIdx(0),
-    mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) {
+    mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
+    mBufferLimitLatency(kBufferLimitLatencyBinSize) {
 
     camera3_stream::stream_type = type;
     camera3_stream::width = width;
@@ -459,8 +460,11 @@
     // Wait for new buffer returned back if we are running into the limit.
     if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
         ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
-                __FUNCTION__, camera3_stream::max_buffers);
+                        __FUNCTION__, camera3_stream::max_buffers);
+        nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
         res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
+        nsecs_t waitEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+        mBufferLimitLatency.add(waitStart, waitEnd);
         if (res != OK) {
             if (res == TIMED_OUT) {
                 ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
@@ -655,6 +659,9 @@
     ALOGV("%s: Stream %d: Disconnecting...", __FUNCTION__, mId);
     status_t res = disconnectLocked();
 
+    mBufferLimitLatency.log("Stream %d latency histogram for wait on max_buffers", mId);
+    mBufferLimitLatency.reset();
+
     if (res == -ENOTCONN) {
         // "Already disconnected" -- not an error
         return OK;
@@ -663,6 +670,13 @@
     }
 }
 
+void Camera3Stream::dump(int fd, const Vector<String16> &args) const
+{
+    (void)args;
+    mBufferLimitLatency.dump(fd,
+            "      Latency histogram for wait on max_buffers");
+}
+
 status_t Camera3Stream::getBufferLocked(camera3_stream_buffer *,
         const std::vector<size_t>&) {
     ALOGE("%s: This type of stream does not support output", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 9cdc1b3..b5a9c5d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -25,6 +25,7 @@
 
 #include "hardware/camera3.h"
 
+#include "utils/LatencyHistogram.h"
 #include "Camera3StreamBufferListener.h"
 #include "Camera3StreamInterface.h"
 
@@ -349,7 +350,7 @@
     /**
      * Debug dump of the stream's state.
      */
-    virtual void     dump(int fd, const Vector<String16> &args) const = 0;
+    virtual void     dump(int fd, const Vector<String16> &args) const;
 
     /**
      * Add a camera3 buffer listener. Adding the same listener twice has
@@ -502,6 +503,10 @@
     // Outstanding buffers dequeued from the stream's buffer queue.
     List<buffer_handle_t> mOutstandingBuffers;
 
+    // Latency histogram of the wait time for handout buffer count to drop below
+    // max_buffers.
+    static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
+    CameraLatencyHistogram mBufferLimitLatency;
 }; // class Camera3Stream
 
 }; // namespace camera3
diff --git a/services/camera/libcameraservice/utils/LatencyHistogram.cpp b/services/camera/libcameraservice/utils/LatencyHistogram.cpp
new file mode 100644
index 0000000..538bb6e
--- /dev/null
+++ b/services/camera/libcameraservice/utils/LatencyHistogram.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraLatencyHistogram"
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include "LatencyHistogram.h"
+
+namespace android {
+
+CameraLatencyHistogram::CameraLatencyHistogram(int32_t binSizeMs, int32_t binCount) :
+        mBinSizeMs(binSizeMs),
+        mBinCount(binCount),
+        mBins(binCount),
+        mTotalCount(0) {
+}
+
+void CameraLatencyHistogram::add(nsecs_t start, nsecs_t end) {
+    nsecs_t duration = end - start;
+    int32_t durationMs = static_cast<int32_t>(duration / 1000000LL);
+    int32_t binIndex = durationMs / mBinSizeMs;
+
+    if (binIndex < 0) {
+        binIndex = 0;
+    } else if (binIndex >= mBinCount) {
+        binIndex = mBinCount-1;
+    }
+
+    mBins[binIndex]++;
+    mTotalCount++;
+}
+
+void CameraLatencyHistogram::reset() {
+    mBins.clear();
+    mTotalCount = 0;
+}
+
+void CameraLatencyHistogram::dump(int fd, const char* name) const {
+    if (mTotalCount == 0) {
+        return;
+    }
+
+    String8 lines;
+    lines.appendFormat("%s (%" PRId64 ") samples\n", name, mTotalCount);
+
+    String8 lineBins, lineBinCounts;
+    formatHistogramText(lineBins, lineBinCounts);
+
+    lineBins.append("\n");
+    lineBinCounts.append("\n");
+    lines.append(lineBins);
+    lines.append(lineBinCounts);
+
+    write(fd, lines.string(), lines.size());
+}
+
+void CameraLatencyHistogram::log(const char* fmt, ...) {
+    if (mTotalCount == 0) {
+        return;
+    }
+
+    va_list args;
+    va_start(args, fmt);
+    String8 histogramName = String8::formatV(fmt, args);
+    ALOGI("%s (%" PRId64 ") samples:", histogramName.string(), mTotalCount);
+    va_end(args);
+
+    String8 lineBins, lineBinCounts;
+    formatHistogramText(lineBins, lineBinCounts);
+
+    ALOGI("%s", lineBins.c_str());
+    ALOGI("%s", lineBinCounts.c_str());
+}
+
+void CameraLatencyHistogram::formatHistogramText(
+        String8& lineBins, String8& lineBinCounts) const {
+    lineBins = "  ";
+    lineBinCounts = "  ";
+
+    for (int32_t i = 0; i < mBinCount; i++) {
+        if (i == mBinCount - 1) {
+            lineBins.append("    inf (max ms)");
+        } else {
+            lineBins.appendFormat("%7d", mBinSizeMs*(i+1));
+        }
+        lineBinCounts.appendFormat("   %02.2f", 100.0*mBins[i]/mTotalCount);
+    }
+    lineBinCounts.append(" (%)");
+}
+
+}; //namespace android
diff --git a/services/camera/libcameraservice/utils/LatencyHistogram.h b/services/camera/libcameraservice/utils/LatencyHistogram.h
new file mode 100644
index 0000000..bfd9b1b
--- /dev/null
+++ b/services/camera/libcameraservice/utils/LatencyHistogram.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
+#define ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
+
+#include <vector>
+
+#include <utils/Timers.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+// Histogram for camera latency characteristic
+class CameraLatencyHistogram {
+public:
+    CameraLatencyHistogram() = delete;
+    CameraLatencyHistogram(int32_t binSizeMs, int32_t binCount=10);
+    void add(nsecs_t start, nsecs_t end);
+    void reset();
+
+    void dump(int fd, const char* name) const;
+    void log(const char* format, ...);
+private:
+    int32_t mBinSizeMs;
+    int32_t mBinCount;
+    std::vector<int64_t> mBins;
+    uint64_t mTotalCount;
+
+    void formatHistogramText(String8& lineBins, String8& lineBinCounts) const;
+}; // class CameraLatencyHistogram
+
+}; // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index d3df52c..39a83c7 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -30,7 +30,7 @@
     libgui \
     liblog \
     libbase \
-    libavservices_minijail \
+    libavservices_minijail_vendor \
     libcutils \
     libhwbinder \
     libhidltransport \
diff --git a/services/minijail/Android.mk b/services/minijail/Android.mk
index 3e63f97..b6fcacc 100644
--- a/services/minijail/Android.mk
+++ b/services/minijail/Android.mk
@@ -8,6 +8,14 @@
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
 include $(BUILD_SHARED_LIBRARY)
 
+# Small library for media.extractor and media.codec sandboxing.
+include $(CLEAR_VARS)
+LOCAL_MODULE := libavservices_minijail_vendor
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := minijail.cpp
+LOCAL_SHARED_LIBRARIES := libbase libminijail_vendor
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+include $(BUILD_SHARED_LIBRARY)
 
 # Unit tests.
 include $(CLEAR_VARS)
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 3dc1feb..5c6825d 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -52,15 +52,17 @@
             assert(false); // There are only two possible directions.
             break;
     }
-    ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
-          endpoint, deviceId, (int)direction);
 
     // If we can't find an existing one then open a new one.
-    if (endpoint == nullptr) {
+    if (endpoint != nullptr) {
+        ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
+              endpoint, deviceId, (int)direction);
+
+    } else {
         if (direction == AAUDIO_DIRECTION_INPUT) {
             AAudioServiceEndpointCapture *capture = new AAudioServiceEndpointCapture(audioService);
             if (capture->open(deviceId) != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+                ALOGE("AAudioEndpointManager::openEndpoint(), open input failed");
                 delete capture;
             } else {
                 mInputs[deviceId] = capture;
@@ -69,17 +71,20 @@
         } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
             AAudioServiceEndpointPlay *player = new AAudioServiceEndpointPlay(audioService);
             if (player->open(deviceId) != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+                ALOGE("AAudioEndpointManager::openEndpoint(), open output failed");
                 delete player;
             } else {
                 mOutputs[deviceId] = player;
                 endpoint = player;
             }
         }
-
+        ALOGD("AAudioEndpointManager::openEndpoint(), created %p for device = %d, dir = %d",
+              endpoint, deviceId, (int)direction);
     }
 
     if (endpoint != nullptr) {
+        ALOGD("AAudioEndpointManager::openEndpoint(), sampleRate = %d, framesPerBurst = %d",
+              endpoint->getSampleRate(), endpoint->getFramesPerBurst());
         // Increment the reference count under this lock.
         endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
     }
@@ -95,9 +100,15 @@
     // Decrement the reference count under this lock.
     int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
     serviceEndpoint->setReferenceCount(newRefCount);
+    ALOGD("AAudioEndpointManager::closeEndpoint(%p) newRefCount = %d",
+          serviceEndpoint, newRefCount);
+
+    // If no longer in use then close and delete it.
     if (newRefCount <= 0) {
         aaudio_direction_t direction = serviceEndpoint->getDirection();
-        int32_t deviceId = serviceEndpoint->getDeviceId();
+        // Track endpoints based on requested deviceId because UNSPECIFIED
+        // can change to a specific device after opening.
+        int32_t deviceId = serviceEndpoint->getRequestedDeviceId();
 
         switch (direction) {
             case AAUDIO_DIRECTION_INPUT:
@@ -109,6 +120,8 @@
         }
 
         serviceEndpoint->close();
+        ALOGD("AAudioEndpointManager::closeEndpoint() delete %p for device %d, dir = %d",
+              serviceEndpoint, deviceId, (int)direction);
         delete serviceEndpoint;
     }
 }
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index db1103d..899ea35 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -35,7 +35,7 @@
 
     /**
      * Find a service endpoint for the given deviceId and direction.
-     * If an endpoint does not already exist then it will try to create one.
+     * If an endpoint does not already exist then try to create one.
      *
      * @param deviceId
      * @param direction
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index c9b9065..b0e0a74 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -89,7 +89,7 @@
         return result;
     } else {
         aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
-        ALOGV("AAudioService::openStream(): handle = 0x%08X", handle);
+        ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
             ALOGE("AAudioService::openStream(): handle table full");
             delete serviceStream;
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index d8ae284..cc2cb44 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -46,6 +46,7 @@
 
 // Set up an EXCLUSIVE MMAP stream that will be shared.
 aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId) {
+    mRequestedDeviceId = deviceId;
     mStreamInternal = getStreamInternal();
 
     AudioStreamBuilder builder;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 50bf049..c271dbd 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -48,6 +48,7 @@
     aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
     aaudio_result_t close();
 
+    int32_t getRequestedDeviceId() const { return mRequestedDeviceId; }
     int32_t getDeviceId() const { return mStreamInternal->getDeviceId(); }
 
     aaudio_direction_t getDirection() const { return mStreamInternal->getDirection(); }
@@ -81,6 +82,7 @@
 
     AudioStreamInternal     *mStreamInternal = nullptr;
     int32_t                  mReferenceCount = 0;
+    int32_t                  mRequestedDeviceId = 0;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
index b977960..89935ae 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.h
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -32,6 +32,9 @@
 
 namespace aaudio {
 
+/**
+ * Contains a mixer and a stream for writing the result of the mix.
+ */
 class AAudioServiceEndpointPlay : public AAudioServiceEndpoint {
 public:
     explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 8f0abc2..ee0e7ed 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -71,12 +71,11 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-
     sendCurrentTimestamp();
     mThreadEnabled.store(false);
     aaudio_result_t result = mAAudioThread.stop();
     if (result != AAUDIO_OK) {
-        processError();
+        processFatalError();
         return result;
     }
     sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
@@ -90,7 +89,7 @@
     mThreadEnabled.store(false);
     aaudio_result_t result = mAAudioThread.stop();
     if (result != AAUDIO_OK) {
-        processError();
+        processFatalError();
         return result;
     }
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
@@ -126,7 +125,7 @@
     ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
 }
 
-void AAudioServiceStreamBase::processError() {
+void AAudioServiceStreamBase::processFatalError() {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
 }
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index ee52c39..46ceeae 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -111,7 +111,7 @@
 
     void run() override; // to implement Runnable
 
-    void processError();
+    void processFatalError();
 
 protected:
     aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
@@ -122,16 +122,16 @@
 
     virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0;
 
-    aaudio_stream_state_t               mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    aaudio_stream_state_t   mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     SharedRingBuffer*  mUpMessageQueue;
     std::mutex         mLockUpMessageQueue;
 
-    AAudioThread        mAAudioThread;
+    AAudioThread       mAAudioThread;
     // This is used by one thread to tell another thread to exit. So it must be atomic.
-    std::atomic<bool>   mThreadEnabled;
+    std::atomic<bool>  mThreadEnabled;
 
     aaudio_format_t    mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t            mFramesPerBurst = 0;
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 97b9937..2f3ec27 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -138,15 +138,19 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    if (deviceId == AAUDIO_UNSPECIFIED) {
+        ALOGW("AAudioServiceStreamMMAP::open() - openMmapStream() failed to set deviceId");
+    }
+
     // Create MMAP/NOIRQ buffer.
     int32_t minSizeFrames = configurationInput.getBufferCapacity();
-    if (minSizeFrames == 0) { // zero will get rejected
+    if (minSizeFrames <= 0) { // zero will get rejected
         minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
     }
     status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
     if (status != OK) {
-        ALOGE("%s: createMmapBuffer() returned status %d, return AAUDIO_ERROR_UNAVAILABLE",
-              __FILE__, status);
+        ALOGE("AAudioServiceStreamMMAP::open() - createMmapBuffer() returned status %d",
+              status);
         return AAUDIO_ERROR_UNAVAILABLE;
     } else {
         ALOGD("createMmapBuffer status %d shared_address = %p buffer_size %d burst_size %d",
@@ -181,6 +185,9 @@
     ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
           mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
 
+    ALOGD("AAudioServiceStreamMMAP::open() actual rate = %d, channels = %d, deviceId = %d\n",
+          mSampleRate, mSamplesPerFrame, deviceId);
+
     // Fill in AAudioStreamConfiguration
     configurationOutput.setSampleRate(mSampleRate);
     configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
@@ -199,7 +206,7 @@
     status_t status = mMmapStream->start(mMmapClient, &mPortHandle);
     if (status != OK) {
         ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", status);
-        processError();
+        processFatalError();
         result = AAudioConvert_androidToAAudioResult(status);
     } else {
         result = AAudioServiceStreamBase::start();
@@ -234,8 +241,6 @@
 aaudio_result_t AAudioServiceStreamMMAP::flush() {
     if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
     // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
-    mState = AAUDIO_STREAM_STATE_FLUSHED;
     return AAudioServiceStreamBase::flush();;
 }
 
@@ -244,13 +249,13 @@
                                                                 int64_t *timeNanos) {
     struct audio_mmap_position position;
     if (mMmapStream == nullptr) {
-        processError();
+        processFatalError();
         return AAUDIO_ERROR_NULL;
     }
     status_t status = mMmapStream->getMmapPosition(&position);
     if (status != OK) {
         ALOGE("sendCurrentTimestamp(): getMmapPosition() returned %d", status);
-        processError();
+        processFatalError();
         return AAudioConvert_androidToAAudioResult(status);
     } else {
         mFramesRead.update32(position.position_frames);
@@ -295,4 +300,4 @@
     parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
     parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
     return AAUDIO_OK;
-}
\ No newline at end of file
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 494b18e..f246fc02 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -34,8 +34,10 @@
 using namespace android;
 using namespace aaudio;
 
-#define MIN_BURSTS_PER_BUFFER   2
-#define MAX_BURSTS_PER_BUFFER   32
+#define MIN_BURSTS_PER_BUFFER       2
+#define DEFAULT_BURSTS_PER_BUFFER   16
+// This is an arbitrary range. TODO review.
+#define MAX_FRAMES_PER_BUFFER       (32 * 1024)
 
 AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
     : mAudioService(audioService)
@@ -46,12 +48,58 @@
     close();
 }
 
+int32_t AAudioServiceStreamShared::calculateBufferCapacity(int32_t requestedCapacityFrames,
+                                                           int32_t framesPerBurst) {
+
+    if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) {
+        ALOGE("AAudioServiceStreamShared::open(), requested capacity %d > max %d",
+              requestedCapacityFrames, MAX_FRAMES_PER_BUFFER);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    // Determine how many bursts will fit in the buffer.
+    int32_t numBursts;
+    if (requestedCapacityFrames == AAUDIO_UNSPECIFIED) {
+        // Use fewer bursts if default is too many.
+        if ((DEFAULT_BURSTS_PER_BUFFER * framesPerBurst) > MAX_FRAMES_PER_BUFFER) {
+            numBursts = MAX_FRAMES_PER_BUFFER / framesPerBurst;
+        } else {
+            numBursts = DEFAULT_BURSTS_PER_BUFFER;
+        }
+    } else {
+        // round up to nearest burst boundary
+        numBursts = (requestedCapacityFrames + framesPerBurst - 1) / framesPerBurst;
+    }
+
+    // Clip to bare minimum.
+    if (numBursts < MIN_BURSTS_PER_BUFFER) {
+        numBursts = MIN_BURSTS_PER_BUFFER;
+    }
+    // Check for numeric overflow.
+    if (numBursts > 0x8000 || framesPerBurst > 0x8000) {
+        ALOGE("AAudioServiceStreamShared::open(), numeric overflow, capacity = %d * %d",
+              numBursts, framesPerBurst);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    int32_t capacityInFrames = numBursts * framesPerBurst;
+
+    // Final sanity check.
+    if (capacityInFrames > MAX_FRAMES_PER_BUFFER) {
+        ALOGE("AAudioServiceStreamShared::open(), calculated capacity %d > max %d",
+              capacityInFrames, MAX_FRAMES_PER_BUFFER);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    ALOGD("AAudioServiceStreamShared::open(), requested capacity = %d frames, actual = %d",
+          requestedCapacityFrames, capacityInFrames);
+    return capacityInFrames;
+}
+
 aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
                      aaudio::AAudioStreamConfiguration &configurationOutput)  {
 
     aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
     if (result != AAUDIO_OK) {
-        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        ALOGE("AAudioServiceStreamBase open() returned %d", result);
         return result;
     }
 
@@ -72,16 +120,18 @@
         mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
     } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
         ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need FLOAT", mAudioFormat);
-        return AAUDIO_ERROR_INVALID_FORMAT;
+        result = AAUDIO_ERROR_INVALID_FORMAT;
+        goto error;
     }
 
     mSampleRate = configurationInput.getSampleRate();
     if (mSampleRate == AAUDIO_UNSPECIFIED) {
         mSampleRate = mServiceEndpoint->getSampleRate();
     } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
-        ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
+        ALOGE("AAudioServiceStreamShared::open(), mSampleRate = %d, need %d",
               mSampleRate, mServiceEndpoint->getSampleRate());
-        return AAUDIO_ERROR_INVALID_RATE;
+        result = AAUDIO_ERROR_INVALID_RATE;
+        goto error;
     }
 
     mSamplesPerFrame = configurationInput.getSamplesPerFrame();
@@ -90,37 +140,51 @@
     } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
         ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
               mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
-        return AAUDIO_ERROR_OUT_OF_RANGE;
+        result = AAUDIO_ERROR_OUT_OF_RANGE;
+        goto error;
     }
 
-    // Determine this stream's shared memory buffer capacity.
     mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
-    int32_t minCapacityFrames = configurationInput.getBufferCapacity();
-    int32_t numBursts = MAX_BURSTS_PER_BUFFER;
-    if (minCapacityFrames != AAUDIO_UNSPECIFIED) {
-        numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
-        if (numBursts < MIN_BURSTS_PER_BUFFER) {
-            numBursts = MIN_BURSTS_PER_BUFFER;
-        } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
-            numBursts = MAX_BURSTS_PER_BUFFER;
-        }
+    ALOGD("AAudioServiceStreamShared::open(), mSampleRate = %d, mFramesPerBurst = %d",
+          mSampleRate, mFramesPerBurst);
+
+    mCapacityInFrames = calculateBufferCapacity(configurationInput.getBufferCapacity(),
+                                     mFramesPerBurst);
+    if (mCapacityInFrames < 0) {
+        result = mCapacityInFrames; // negative error code
+        mCapacityInFrames = 0;
+        goto error;
     }
-    mCapacityInFrames = numBursts * mFramesPerBurst;
-    ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
 
     // Create audio data shared memory buffer for client.
     mAudioDataQueue = new SharedRingBuffer();
-    mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    result = mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::open(), could not allocate FIFO with %d frames",
+              mCapacityInFrames);
+        result = AAUDIO_ERROR_NO_MEMORY;
+        goto error;
+    }
+
+    ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
+          mSampleRate, mSamplesPerFrame, mServiceEndpoint->getDeviceId());
 
     // Fill in configuration for client.
     configurationOutput.setSampleRate(mSampleRate);
     configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
     configurationOutput.setAudioFormat(mAudioFormat);
-    configurationOutput.setDeviceId(deviceId);
+    configurationOutput.setDeviceId(mServiceEndpoint->getDeviceId());
 
-    mServiceEndpoint->registerStream(this);
+    result = mServiceEndpoint->registerStream(this);
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
 
     return AAUDIO_OK;
+
+error:
+    close();
+    return result;
 }
 
 /**
@@ -137,11 +201,11 @@
     aaudio_result_t result = endpoint->startStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     } else {
         result = AAudioServiceStreamBase::start();
     }
-    return AAUDIO_OK;
+    return result;
 }
 
 /**
@@ -154,11 +218,10 @@
     if (endpoint == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Add this stream to the mixer.
     aaudio_result_t result = endpoint->stopStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     }
     return AAudioServiceStreamBase::pause();
 }
@@ -168,11 +231,10 @@
     if (endpoint == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Add this stream to the mixer.
     aaudio_result_t result = endpoint->stopStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     }
     return AAudioServiceStreamBase::stop();
 }
@@ -183,9 +245,17 @@
  * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamShared::flush()  {
-    // TODO make sure we are paused
-    // TODO actually flush the data
-    return AAudioServiceStreamBase::flush() ;
+    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+    if (endpoint == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    if (mState != AAUDIO_STREAM_STATE_PAUSED) {
+        ALOGE("AAudioServiceStreamShared::flush() stream not paused, state = %s",
+            AAudio_convertStreamStateToText(mState));
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    // Data will get flushed when the client receives the FLUSHED event.
+    return AAudioServiceStreamBase::flush();
 }
 
 aaudio_result_t AAudioServiceStreamShared::close()  {
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index dfdbbb3..35af434 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -97,6 +97,14 @@
 
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
 
+    /**
+     * @param requestedCapacityFrames
+     * @param framesPerBurst
+     * @return capacity or negative error
+     */
+    static int32_t calculateBufferCapacity(int32_t requestedCapacityFrames,
+                                            int32_t framesPerBurst);
+
 private:
     android::AAudioService  &mAudioService;
     AAudioServiceEndpoint   *mServiceEndpoint = nullptr;
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index b447725..7f7d465 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -46,6 +46,7 @@
 LOCAL_SHARED_LIBRARIES :=  \
     libaaudio \
     libaudioflinger \
+    libaudioclient \
     libbinder \
     libcutils \
     libmediautils \
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
index bfc5500..f637275 100644
--- a/services/radio/RadioHalHidl.cpp
+++ b/services/radio/RadioHalHidl.cpp
@@ -318,7 +318,7 @@
 {
     ALOGV("%s IN", __FUNCTION__);
     radio_hal_event_t event = {};
-    RadioMetadataWrapper metadataWrapper(&event.info.metadata);
+    RadioMetadataWrapper metadataWrapper(&event.metadata);
 
     event.type = RADIO_EVENT_METADATA;
     HidlUtils::convertMetaDataFromHal(&event.metadata, metadata, channel, subChannel);