Merge changes from topic 'fix-omx-dependencies' into oc-dr1-dev

* changes:
  Fix OMX dependencies.
  Use vendor variant of libstagefright_foundation.
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
index b56328b..5053e7d 100644
--- a/media/libaaudio/examples/input_monitor/Android.mk
+++ b/media/libaaudio/examples/input_monitor/Android.mk
@@ -1,6 +1 @@
-# include $(call all-subdir-makefiles)
-
-# Just include static/ for now.
-LOCAL_PATH := $(call my-dir)
-#include $(LOCAL_PATH)/jni/Android.mk
-include $(LOCAL_PATH)/static/Android.mk
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
index 3e24f9f..9b1ce2c 100644
--- a/media/libaaudio/examples/input_monitor/jni/Android.mk
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -11,7 +11,7 @@
 # NDK recommends using this kind of relative path instead of an absolute path.
 LOCAL_SRC_FILES:= ../src/input_monitor.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_ndk
+LOCAL_MODULE := input_monitor
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
@@ -23,11 +23,11 @@
 
 LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_callback_ndk
+LOCAL_MODULE := input_monitor_callback
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE := libaaudio_prebuilt
 LOCAL_SRC_FILES := libaaudio.so
 LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
\ No newline at end of file
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
deleted file mode 100644
index 80a3906..0000000
--- a/media/libaaudio/examples/input_monitor/static/Android.mk
+++ /dev/null
@@ -1,37 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := samples
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-# TODO reorganize folders to avoid using ../
-LOCAL_SRC_FILES:= ../src/input_monitor.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libtinyalsa libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := input_monitor
-include $(BUILD_EXECUTABLE)
-
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := input_monitor_callback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/input_monitor/static/README.md b/media/libaaudio/examples/input_monitor/static/README.md
deleted file mode 100644
index 6e26d7b..0000000
--- a/media/libaaudio/examples/input_monitor/static/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Makefile for building simple command line examples.
-They link with AAudio as a static library.
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
index c306ed3..d630e76 100644
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -11,7 +11,7 @@
 # NDK recommends using this kind of relative path instead of an absolute path.
 LOCAL_SRC_FILES:= ../src/write_sine.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_ndk
+LOCAL_MODULE := write_sine
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
@@ -23,7 +23,7 @@
 
 LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
 LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_callback_ndk
+LOCAL_MODULE := write_sine_callback
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
deleted file mode 100644
index 1f8dcd9..0000000
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ /dev/null
@@ -1,38 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := samples
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/src \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/write_sine.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libtinyalsa libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := write_sine
-include $(BUILD_EXECUTABLE)
-
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/media/libaaudio/include \
-    frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
-
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
-
-LOCAL_MODULE := write_sine_callback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/write_sine/static/README.md b/media/libaaudio/examples/write_sine/static/README.md
deleted file mode 100644
index 6e26d7b..0000000
--- a/media/libaaudio/examples/write_sine/static/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Makefile for building simple command line examples.
-They link with AAudio as a static library.
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index b8ef611..ef4a51f 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -45,11 +45,9 @@
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
-        ALOGV("BpAAudioService::client openStream --------------------");
         // request.dump();
         request.writeToParcel(&data);
         status_t err = remote()->transact(OPEN_STREAM, data, &reply);
-        ALOGV("BpAAudioService::client openStream returned %d", err);
         if (err != NO_ERROR) {
             ALOGE("BpAAudioService::client openStream transact failed %d", err);
             return AAudioConvert_androidToAAudioResult(err);
@@ -57,6 +55,7 @@
         // parse reply
         aaudio_handle_t stream;
         err = reply.readInt32(&stream);
+        ALOGD("BpAAudioService::client openStream returned stream = 0x%08x", stream);
         if (err != NO_ERROR) {
             ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
             return AAudioConvert_androidToAAudioResult(err);
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 5cb642b..0684ed6 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -246,3 +246,7 @@
     return (int32_t)mDataQueue->getBufferCapacityInFrames();
 }
 
+void AudioEndpoint::dump() const {
+    ALOGD("AudioEndpoint: data readCounter  = %lld", (long long) mDataQueue->getReadCounter());
+    ALOGD("AudioEndpoint: data writeCounter = %lld", (long long) mDataQueue->getWriteCounter());
+}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 53ba033..e7c6916 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -91,6 +91,8 @@
 
     int32_t getBufferCapacityInFrames() const;
 
+    void dump() const;
+
 private:
     android::FifoBuffer    *mUpCommandQueue;
     android::FifoBuffer    *mDataQueue;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 3a827f0..b59c445 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -62,8 +62,9 @@
         , mAudioEndpoint()
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mFramesPerBurst(16)
-        , mServiceInterface(serviceInterface)
-        , mInService(inService) {
+        , mStreamVolume(1.0f)
+        , mInService(inService)
+        , mServiceInterface(serviceInterface) {
 }
 
 AudioStreamInternal::~AudioStreamInternal() {
@@ -153,13 +154,13 @@
         if (getDataCallbackProc()) {
             mCallbackFrames = builder.getFramesPerDataCallback();
             if (mCallbackFrames > getBufferCapacity() / 2) {
-                ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
+                ALOGE("AudioStreamInternal::open(): framesPerCallback too big = %d, capacity = %d",
                       mCallbackFrames, getBufferCapacity());
                 mServiceInterface.closeStream(mServiceStreamHandle);
                 return AAUDIO_ERROR_OUT_OF_RANGE;
 
             } else if (mCallbackFrames < 0) {
-                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+                ALOGE("AudioStreamInternal::open(): framesPerCallback negative");
                 mServiceInterface.closeStream(mServiceStreamHandle);
                 return AAUDIO_ERROR_OUT_OF_RANGE;
 
@@ -175,12 +176,16 @@
         }
 
         setState(AAUDIO_STREAM_STATE_OPEN);
+        // only connect to AudioManager if this is a playback stream running in client process
+        if (!mInService && getDirection() == AAUDIO_DIRECTION_OUTPUT) {
+            init(android::PLAYER_TYPE_AAUDIO, AUDIO_USAGE_MEDIA);
+        }
     }
     return result;
 }
 
 aaudio_result_t AudioStreamInternal::close() {
-    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+    ALOGD("AudioStreamInternal::close(): mServiceStreamHandle = 0x%08X",
              mServiceStreamHandle);
     if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
         // Don't close a stream while it is running.
@@ -196,12 +201,14 @@
                 result, AAudio_convertResultToText(result));
             }
         }
+        setState(AAUDIO_STREAM_STATE_CLOSING);
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
         mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
 
         mServiceInterface.closeStream(serviceStreamHandle);
         delete[] mCallbackBuffer;
         mCallbackBuffer = nullptr;
+        setState(AAUDIO_STREAM_STATE_CLOSED);
         return mEndPointParcelable.close();
     } else {
         return AAUDIO_ERROR_INVALID_HANDLE;
@@ -223,15 +230,20 @@
 aaudio_result_t AudioStreamInternal::requestStart()
 {
     int64_t startTime;
-    ALOGD("AudioStreamInternal(): start()");
+    ALOGD("AudioStreamInternal()::requestStart()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    if (isActive()) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_stream_state_t originalState = getState();
+
+    setState(AAUDIO_STREAM_STATE_STARTING);
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());
 
     startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
-    setState(AAUDIO_STREAM_STATE_STARTING);
-    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
 
     if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
         // Launch the callback loop thread.
@@ -241,6 +253,9 @@
         mCallbackEnabled.store(true);
         result = createThread(periodNanos, aaudio_callback_thread_proc, this);
     }
+    if (result != AAUDIO_OK) {
+        setState(originalState);
+    }
     return result;
 }
 
@@ -274,14 +289,14 @@
 aaudio_result_t AudioStreamInternal::requestPauseInternal()
 {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
-    return mServiceInterface.pauseStream(mServiceStreamHandle);
+    return AAudioConvert_androidToAAudioResult(pauseWithStatus());
 }
 
 aaudio_result_t AudioStreamInternal::requestPause()
@@ -296,7 +311,7 @@
 
 aaudio_result_t AudioStreamInternal::requestFlush() {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -307,13 +322,14 @@
 
 // TODO for Play only
 void AudioStreamInternal::onFlushFromServer() {
-    ALOGD("AudioStreamInternal(): onFlushFromServer()");
     int64_t readCounter = mAudioEndpoint.getDataReadCounter();
     int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
 
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
     int64_t framesFlushed = writeCounter - readCounter;
     mFramesOffsetFromService += framesFlushed;
+    ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
+          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
 
     // Flush written frames by forcing writeCounter to readCounter.
     // This is because we cannot move the read counter in the hardware.
@@ -323,14 +339,14 @@
 aaudio_result_t AudioStreamInternal::requestStopInternal()
 {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_STOPPING);
-    return mServiceInterface.stopStream(mServiceStreamHandle);
+    return AAudioConvert_androidToAAudioResult(stopWithStatus());
 }
 
 aaudio_result_t AudioStreamInternal::requestStop()
@@ -365,7 +381,7 @@
                            int64_t *timeNanoseconds) {
     // TODO Generate in server and pass to client. Return latest.
     int64_t time = AudioClock::getNanoseconds();
-    *framePosition = mClockModel.convertTimeToPosition(time);
+    *framePosition = mClockModel.convertTimeToPosition(time) + mFramesOffsetFromService;
     // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
     *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
     return AAUDIO_OK;
@@ -378,31 +394,28 @@
     return processCommands();
 }
 
-#if LOG_TIMESTAMPS
-static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
+void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
     static int64_t oldPosition = 0;
     static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
     int64_t nanoTime = command.timestamp.timestamp;
-    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %lld",
+    ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
          (long long) framePosition,
          (long long) nanoTime);
     int64_t nanosDelta = nanoTime - oldTime;
     if (nanosDelta > 0 && oldTime > 0) {
         int64_t framesDelta = framePosition - oldPosition;
         int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
-        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
-        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
-        ALOGD("AudioStreamInternal() - measured rate = %lld", (long long) rate);
+        ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
+              (long long) framesDelta, (long long) nanosDelta, (long long) rate);
     }
     oldPosition = framePosition;
     oldTime = nanoTime;
 }
-#endif
 
 aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
 #if LOG_TIMESTAMPS
-    AudioStreamInternal_logTimestamp(*message);
+    logTimestamp(*message);
 #endif
     processTimestamp(message->timestamp.position, message->timestamp.timestamp);
     return AAUDIO_OK;
@@ -412,46 +425,48 @@
     aaudio_result_t result = AAUDIO_OK;
     switch (message->event.event) {
         case AAUDIO_SERVICE_EVENT_STARTED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_STARTED");
             if (getState() == AAUDIO_STREAM_STATE_STARTING) {
                 setState(AAUDIO_STREAM_STATE_STARTED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_PAUSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_PAUSED");
             if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
                 setState(AAUDIO_STREAM_STATE_PAUSED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_STOPPED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_STOPPED");
             if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
                 setState(AAUDIO_STREAM_STATE_STOPPED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_FLUSHED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+            ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_FLUSHED");
             if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
                 setState(AAUDIO_STREAM_STATE_FLUSHED);
                 onFlushFromServer();
             }
             break;
         case AAUDIO_SERVICE_EVENT_CLOSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+            ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_CLOSED");
             setState(AAUDIO_STREAM_STATE_CLOSED);
             break;
         case AAUDIO_SERVICE_EVENT_DISCONNECTED:
             result = AAUDIO_ERROR_DISCONNECTED;
             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
-            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
+            ALOGW("WARNING - AudioStreamInternal::onEventFromServer()"
+                          " AAUDIO_SERVICE_EVENT_DISCONNECTED");
             break;
         case AAUDIO_SERVICE_EVENT_VOLUME:
-            mVolumeRamp.setTarget((float) message->event.dataDouble);
-            ALOGD("processCommands() AAUDIO_SERVICE_EVENT_VOLUME %lf",
+            mStreamVolume = (float)message->event.dataDouble;
+            doSetVolume();
+            ALOGD("AudioStreamInternal::onEventFromServer() AAUDIO_SERVICE_EVENT_VOLUME %lf",
                      message->event.dataDouble);
             break;
         default:
-            ALOGW("WARNING - processCommands() Unrecognized event = %d",
+            ALOGW("WARNING - AudioStreamInternal::onEventFromServer() Unrecognized event = %d",
                  (int) message->event.event);
             break;
     }
@@ -493,27 +508,27 @@
 {
     const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
     ATRACE_BEGIN(traceName);
-    aaudio_result_t result = AAUDIO_OK;
-    int32_t loopCount = 0;
-    uint8_t* audioData = (uint8_t*)buffer;
-    int64_t currentTimeNanos = AudioClock::getNanoseconds();
-    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
-    int32_t framesLeft = numFrames;
-
     int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
     if (ATRACE_ENABLED()) {
         const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
         ATRACE_INT(traceName, fullFrames);
     }
 
+    aaudio_result_t result = AAUDIO_OK;
+    int32_t loopCount = 0;
+    uint8_t* audioData = (uint8_t*)buffer;
+    int64_t currentTimeNanos = AudioClock::getNanoseconds();
+    const int64_t entryTimeNanos = currentTimeNanos;
+    const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int32_t framesLeft = numFrames;
+
     // Loop until all the data has been processed or until a timeout occurs.
     while (framesLeft > 0) {
-        // The call to processDataNow() will not block. It will just read as much as it can.
+        // The call to processDataNow() will not block. It will just process as much as it can.
         int64_t wakeTimeNanos = 0;
         aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
                                                   currentTimeNanos, &wakeTimeNanos);
         if (framesProcessed < 0) {
-            ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
             result = framesProcessed;
             break;
         }
@@ -531,12 +546,16 @@
             if (wakeTimeNanos > deadlineNanos) {
                 // If we time out, just return the framesWritten so far.
                 // TODO remove after we fix the deadline bug
-                ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
+                ALOGW("AudioStreamInternal::processData(): entered at %lld nanos, currently %lld",
+                      (long long) entryTimeNanos, (long long) currentTimeNanos);
+                ALOGW("AudioStreamInternal::processData(): timed out after %lld nanos",
                       (long long) timeoutNanoseconds);
-                ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+                ALOGW("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
                       (long long) wakeTimeNanos, (long long) deadlineNanos);
-                ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
+                ALOGW("AudioStreamInternal::processData(): past deadline by %d micros",
                       (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
+                mClockModel.dump();
+                mAudioEndpoint.dump();
                 break;
             }
 
@@ -588,3 +607,32 @@
 aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
     return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
 }
+
+void AudioStreamInternal::doSetVolume() {
+    // No pan and only left volume is taken into account from IPLayer interface
+    mVolumeRamp.setTarget(mStreamVolume * mVolumeMultiplierL /* * mPanMultiplierL */);
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of PlayerBase
+status_t AudioStreamInternal::playerStart() {
+    return AAudioConvert_aaudioToAndroidStatus(mServiceInterface.startStream(mServiceStreamHandle));
+}
+
+status_t AudioStreamInternal::playerPause() {
+    return AAudioConvert_aaudioToAndroidStatus(mServiceInterface.pauseStream(mServiceStreamHandle));
+}
+
+status_t AudioStreamInternal::playerStop() {
+    return AAudioConvert_aaudioToAndroidStatus(mServiceInterface.stopStream(mServiceStreamHandle));
+}
+
+status_t AudioStreamInternal::playerSetVolume() {
+    doSetVolume();
+    return NO_ERROR;
+}
+
+void AudioStreamInternal::destroy() {
+    baseDestroy();
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index a11f309..c2c6419 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -18,6 +18,7 @@
 #define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_H
 
 #include <stdint.h>
+#include <media/PlayerBase.h>
 #include <aaudio/AAudio.h>
 
 #include "binding/IAAudioService.h"
@@ -34,7 +35,7 @@
 namespace aaudio {
 
 // A stream that talks to the AAudioService or directly to a HAL.
-class AudioStreamInternal : public AudioStream {
+class AudioStreamInternal : public AudioStream, public android::PlayerBase  {
 
 public:
     AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService);
@@ -89,6 +90,9 @@
     // Calculate timeout based on framesPerBurst
     int64_t calculateReasonableTimeout();
 
+    //PlayerBase virtuals
+    virtual void destroy();
+
 protected:
 
     aaudio_result_t processData(void *buffer,
@@ -121,9 +125,19 @@
 
     aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
 
+    void logTimestamp(AAudioServiceMessage &message);
+
     // Calculate timeout for an operation involving framesPerOperation.
     int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
+    void doSetVolume();
+
+    //PlayerBase virtuals
+    virtual status_t playerStart();
+    virtual status_t playerPause();
+    virtual status_t playerStop();
+    virtual status_t playerSetVolume();
+
     aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
 
     IsochronousClockModel    mClockModel;      // timing model for chasing the HAL
@@ -135,6 +149,7 @@
     int32_t                  mXRunCount = 0;      // how many underrun events?
 
     LinearRamp               mVolumeRamp;
+    float                    mStreamVolume;
 
     // Offset from underlying frame position.
     int64_t                  mFramesOffsetFromService = 0; // offset for timestamps
@@ -142,6 +157,9 @@
     uint8_t                 *mCallbackBuffer = nullptr;
     int32_t                  mCallbackFrames = 0;
 
+    // The service uses this for SHARED mode.
+    bool                     mInService = false;  // Is this running in the client or the service?
+
 private:
     /*
      * Asynchronous write with data conversion.
@@ -159,8 +177,6 @@
     EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
     AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
 
-    // The service uses this for SHARED mode.
-    bool                     mInService = false;  // Is this running in the client or the service?
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 93693bd..22f8bd1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -14,10 +14,11 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudio"
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <algorithm>
 #include <aaudio/AAudio.h>
 
 #include "client/AudioStreamInternalCapture.h"
@@ -155,29 +156,27 @@
 
     int32_t framesProcessed = numFrames - framesLeft;
     mAudioEndpoint.advanceReadIndex(framesProcessed);
-    incrementFramesRead(framesProcessed);
 
     //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
     return framesProcessed;
 }
 
-int64_t AudioStreamInternalCapture::getFramesWritten()
-{
-    int64_t frames =
-            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
-            + mFramesOffsetFromService;
-    // Prevent retrograde motion.
-    if (frames < mLastFramesWritten) {
-        frames = mLastFramesWritten;
+int64_t AudioStreamInternalCapture::getFramesWritten() {
+    int64_t framesWrittenHardware;
+    if (isActive()) {
+        framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
     } else {
-        mLastFramesWritten = frames;
+        framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
     }
-    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld", (long long)frames);
-    return frames;
+    // Prevent retrograde motion.
+    mLastFramesWritten = std::max(mLastFramesWritten,
+                                  framesWrittenHardware + mFramesOffsetFromService);
+    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
+    //      (long long)mLastFramesWritten);
+    return mLastFramesWritten;
 }
 
-int64_t AudioStreamInternalCapture::getFramesRead()
-{
+int64_t AudioStreamInternalCapture::getFramesRead() {
     int64_t frames = mAudioEndpoint.getDataWriteCounter()
                                + mFramesOffsetFromService;
     //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index fc9766f..76ecbf9 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudio"
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
@@ -51,15 +51,17 @@
     }
 
     if (mAudioEndpoint.isFreeRunning()) {
-        //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
         // Update data queue based on the timing model.
         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
         mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
     }
-    // TODO else query from endpoint cuz set by actual reader, maybe
 
     // If the read index passed the write index then consider it an underrun.
     if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+        ALOGV("AudioStreamInternal::processDataNow() - XRun! write = %d, read = %d",
+              (int)mAudioEndpoint.getDataWriteCounter(),
+              (int)mAudioEndpoint.getDataReadCounter());
         mXRunCount++;
     }
 
@@ -201,9 +203,6 @@
     int32_t framesWritten = numFrames - framesLeft;
     mAudioEndpoint.advanceWriteIndex(framesWritten);
 
-    if (framesWritten > 0) {
-        incrementFramesWritten(framesWritten);
-    }
     // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
     return framesWritten;
 }
@@ -211,25 +210,29 @@
 
 int64_t AudioStreamInternalPlay::getFramesRead()
 {
-    int64_t framesRead =
-            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
-            + mFramesOffsetFromService;
+    int64_t framesReadHardware;
+    if (isActive()) {
+        framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
+    } else {
+        framesReadHardware = mAudioEndpoint.getDataReadCounter();
+    }
+    int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
     // Prevent retrograde motion.
     if (framesRead < mLastFramesRead) {
         framesRead = mLastFramesRead;
     } else {
         mLastFramesRead = framesRead;
     }
-    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
     return framesRead;
 }
 
 int64_t AudioStreamInternalPlay::getFramesWritten()
 {
-    int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
+    int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
                                + mFramesOffsetFromService;
-    ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
-    return getFramesWritten;
+    //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
+    return framesWritten;
 }
 
 
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 1de33bb..73f4c1d 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -41,6 +41,13 @@
 IsochronousClockModel::~IsochronousClockModel() {
 }
 
+void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
+    ALOGV("IsochronousClockModel::setPositionAndTime(%lld, %lld)",
+          (long long) framePosition, (long long) nanoTime);
+    mMarkerFramePosition = framePosition;
+    mMarkerNanoTime = nanoTime;
+}
+
 void IsochronousClockModel::start(int64_t nanoTime) {
     ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
     mMarkerNanoTime = nanoTime;
@@ -49,8 +56,8 @@
 
 void IsochronousClockModel::stop(int64_t nanoTime) {
     ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
-    mMarkerNanoTime = nanoTime;
-    mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+    setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
+    // TODO should we set position?
     mState = STATE_STOPPED;
 }
 
@@ -79,15 +86,13 @@
     case STATE_STOPPED:
         break;
     case STATE_STARTING:
-        mMarkerFramePosition = framePosition;
-        mMarkerNanoTime = nanoTime;
+        setPositionAndTime(framePosition, nanoTime);
         mState = STATE_SYNCING;
         break;
     case STATE_SYNCING:
         // This will handle a burst of rapid transfer at the beginning.
         if (nanosDelta < expectedNanosDelta) {
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime;
+            setPositionAndTime(framePosition, nanoTime);
         } else {
 //            ALOGD("processTimestamp() - advance to STATE_RUNNING");
             mState = STATE_RUNNING;
@@ -98,17 +103,15 @@
             // Earlier than expected timestamp.
             // This data is probably more accurate so use it.
             // or we may be drifting due to a slow HW clock.
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime;
 //            ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
 //                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+            setPositionAndTime(framePosition, nanoTime);
         } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
             // Later than expected timestamp.
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
 //            ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
 //                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
 //                 (int) (mMaxLatenessInNanos / 1000));
+            setPositionAndTime(framePosition - mFramesPerBurst,  nanoTime - mMaxLatenessInNanos);
         }
         break;
     default:
@@ -131,8 +134,7 @@
     mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
 }
 
-int64_t IsochronousClockModel::convertDeltaPositionToTime(
-        int64_t framesDelta) const {
+int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
     return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
 }
 
@@ -171,3 +173,12 @@
 //         (long long) framesDelta, mFramesPerBurst);
     return position;
 }
+
+void IsochronousClockModel::dump() const {
+    ALOGD("IsochronousClockModel::mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
+    ALOGD("IsochronousClockModel::mMarkerNanoTime      = %lld", (long long) mMarkerNanoTime);
+    ALOGD("IsochronousClockModel::mSampleRate          = %6d", mSampleRate);
+    ALOGD("IsochronousClockModel::mFramesPerBurst      = %6d", mFramesPerBurst);
+    ALOGD("IsochronousClockModel::mMaxLatenessInNanos  = %6d", mMaxLatenessInNanos);
+    ALOGD("IsochronousClockModel::mState               = %6d", mState);
+}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 0314f55..585f53a 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -43,6 +43,8 @@
      */
     void setSampleRate(int32_t sampleRate);
 
+    void setPositionAndTime(int64_t framePosition, int64_t nanoTime);
+
     int32_t getSampleRate() const {
         return mSampleRate;
     }
@@ -86,6 +88,8 @@
      */
     int64_t convertDeltaTimeToPosition(int64_t nanosDelta) const;
 
+    void dump() const;
+
 private:
     enum clock_model_state_t {
         STATE_STOPPED,
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 76f98fa..3f5de77 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -253,8 +253,10 @@
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestStart(%p)", stream);
-    return audioStream->requestStart();
+    ALOGD("AAudioStream_requestStart(%p) called --------------", stream);
+    aaudio_result_t result = audioStream->requestStart();
+    ALOGD("AAudioStream_requestStart(%p) returned ------------", stream);
+    return result;
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream)
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 5a05c0e..19b08c4 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -36,6 +36,22 @@
     setPeriodNanoseconds(0);
 }
 
+static const char *AudioStream_convertSharingModeToShortText(aaudio_sharing_mode_t sharingMode) {
+    const char *result;
+    switch (sharingMode) {
+        case AAUDIO_SHARING_MODE_EXCLUSIVE:
+            result = "EX";
+            break;
+        case AAUDIO_SHARING_MODE_SHARED:
+            result = "SH";
+            break;
+        default:
+            result = "?!";
+            break;
+    }
+    return result;
+}
+
 aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
 {
     // Call here as well because the AAudioService will call this without calling build().
@@ -62,8 +78,9 @@
     mErrorCallbackUserData = builder.getErrorCallbackUserData();
 
     // This is very helpful for debugging in the future. Please leave it in.
-    ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %d, dir = %s",
-          mSampleRate, mSamplesPerFrame, mFormat, mSharingMode,
+    ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %s, dir = %s",
+          mSampleRate, mSamplesPerFrame, mFormat,
+          AudioStream_convertSharingModeToShortText(mSharingMode),
           (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
     ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
           mDeviceId, mPerformanceMode, mFramesPerDataCallback);
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 39c9f9c..7c7fcc5 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -186,13 +186,9 @@
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
-    virtual int64_t getFramesWritten() {
-        return mFramesWritten.get();
-    }
+    virtual int64_t getFramesWritten() = 0;
 
-    virtual int64_t getFramesRead() {
-        return mFramesRead.get();
-    }
+    virtual int64_t getFramesRead() = 0;
 
     AAudioStream_dataCallback getDataCallbackProc() const {
         return mDataCallbackProc;
@@ -232,13 +228,6 @@
 
 protected:
 
-    virtual int64_t incrementFramesWritten(int32_t frames) {
-        return mFramesWritten.increment(frames);
-    }
-
-    virtual int64_t incrementFramesRead(int32_t frames) {
-        return mFramesRead.increment(frames);
-    }
 
     /**
      * This should not be called after the open() call.
@@ -281,8 +270,6 @@
     std::atomic<bool>    mCallbackEnabled;
 
 protected:
-    MonotonicCounter     mFramesWritten;
-    MonotonicCounter     mFramesRead;
 
     void setPeriodNanoseconds(int64_t periodNanoseconds) {
         mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 0ded8e1..d2ef3c7 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -74,6 +74,15 @@
 
     virtual int64_t incrementClientFrameCounter(int32_t frames)  = 0;
 
+
+    virtual int64_t getFramesWritten() override {
+        return mFramesWritten.get();
+    }
+
+    virtual int64_t getFramesRead() override {
+        return mFramesRead.get();
+    }
+
 protected:
 
     class StreamDeviceCallback : public android::AudioSystem::AudioDeviceCallback
@@ -103,6 +112,17 @@
     void onStart() { mCallbackEnabled.store(true); }
     void onStop() { mCallbackEnabled.store(false); }
 
+    int64_t incrementFramesWritten(int32_t frames) {
+        return mFramesWritten.increment(frames);
+    }
+
+    int64_t incrementFramesRead(int32_t frames) {
+        return mFramesRead.increment(frames);
+    }
+
+    MonotonicCounter           mFramesWritten;
+    MonotonicCounter           mFramesRead;
+
     FixedBlockAdapter         *mBlockAdapter = nullptr;
     aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
     int32_t                    mCallbackBufferSize = 0;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 7e39908..77f31e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -423,7 +423,7 @@
     default:
         break;
     }
-    return AudioStream::getFramesRead();
+    return AudioStreamLegacy::getFramesRead();
 }
 
 aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 164784d..2d8ac6e 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -208,9 +208,12 @@
     status_t status;
     switch (result) {
     case AAUDIO_ERROR_DISCONNECTED:
-    case AAUDIO_ERROR_INVALID_HANDLE:
+    case AAUDIO_ERROR_NO_SERVICE:
         status = DEAD_OBJECT;
         break;
+    case AAUDIO_ERROR_INVALID_HANDLE:
+        status = BAD_TYPE;
+        break;
     case AAUDIO_ERROR_INVALID_STATE:
         status = INVALID_OPERATION;
         break;
@@ -233,7 +236,6 @@
     case AAUDIO_ERROR_NO_FREE_HANDLES:
     case AAUDIO_ERROR_NO_MEMORY:
     case AAUDIO_ERROR_TIMEOUT:
-    case AAUDIO_ERROR_NO_SERVICE:
     default:
         status = UNKNOWN_ERROR;
         break;
@@ -257,12 +259,12 @@
     case INVALID_OPERATION:
         result = AAUDIO_ERROR_INVALID_STATE;
         break;
-        case UNEXPECTED_NULL:
-            result = AAUDIO_ERROR_NULL;
-            break;
-        case BAD_VALUE:
-            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-            break;
+    case UNEXPECTED_NULL:
+        result = AAUDIO_ERROR_NULL;
+        break;
+    case BAD_VALUE:
+        result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+        break;
     case WOULD_BLOCK:
         result = AAUDIO_ERROR_WOULD_BLOCK;
         break;
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index afcdebf..e6c779b 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -6,9 +6,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_handle_tracker.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := test_handle_tracker
 include $(BUILD_NATIVE_TEST)
 
@@ -18,9 +16,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_marshalling.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
 LOCAL_MODULE := test_aaudio_marshalling
 include $(BUILD_NATIVE_TEST)
 
@@ -30,9 +26,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_block_adapter.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := test_block_adapter
 include $(BUILD_NATIVE_TEST)
 
@@ -42,9 +36,7 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_linear_ramp.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio
 LOCAL_MODULE := test_linear_ramp
 include $(BUILD_NATIVE_TEST)
 
@@ -54,8 +46,6 @@
     frameworks/av/media/libaaudio/include \
     frameworks/av/media/libaaudio/src
 LOCAL_SRC_FILES:= test_open_params.cpp
-LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
-                          libcutils liblog libmedia libutils libaudiomanager
-LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
 LOCAL_MODULE := test_open_params
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index d29aa74..13198e8 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -507,6 +507,18 @@
 }
 
 // -------------------------------------------------------------------------
+// TODO Move this macro to a common header file for enum to string conversion in audio framework.
+#define MEDIA_CASE_ENUM(name) case name: return #name
+const char * AudioRecord::convertTransferToText(transfer_type transferType) {
+    switch (transferType) {
+        MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
+        MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
+        MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
+        MEDIA_CASE_ENUM(TRANSFER_SYNC);
+        default:
+            return "UNRECOGNIZED";
+    }
+}
 
 // must be called with mLock held
 status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
@@ -590,12 +602,20 @@
             (mTransfer == TRANSFER_SYNC) ||
             // use case 3: obtain/release mode
             (mTransfer == TRANSFER_OBTAIN);
+        if (!useCaseAllowed) {
+            ALOGW("AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
+                  convertTransferToText(mTransfer));
+        }
+
         // sample rates must also match
-        bool fastAllowed = useCaseAllowed && (mSampleRate == afSampleRate);
+        bool sampleRateAllowed = mSampleRate == afSampleRate;
+        if (!sampleRateAllowed) {
+            ALOGW("AUDIO_INPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
+                  mSampleRate, afSampleRate);
+        }
+
+        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
         if (!fastAllowed) {
-            ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, "
-                "track %u Hz, input %u Hz",
-                mTransfer, mSampleRate, afSampleRate);
             mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
                     AUDIO_INPUT_FLAG_RAW));
             AudioSystem::releaseInput(input, mSessionId);
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 98ec7d7..e1805a7 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1268,6 +1268,20 @@
     }
 }
 
+// TODO Move this macro to a common header file for enum to string conversion in audio framework.
+#define MEDIA_CASE_ENUM(name) case name: return #name
+const char * AudioTrack::convertTransferToText(transfer_type transferType) {
+    switch (transferType) {
+        MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
+        MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
+        MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
+        MEDIA_CASE_ENUM(TRANSFER_SYNC);
+        MEDIA_CASE_ENUM(TRANSFER_SHARED);
+        default:
+            return "UNRECOGNIZED";
+    }
+}
+
 status_t AudioTrack::createTrack_l()
 {
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -1343,22 +1357,32 @@
 
     // Client can only express a preference for FAST.  Server will perform additional tests.
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-        bool useCaseAllowed =
-            // either of these use cases:
-            // use case 1: shared buffer
-            (mSharedBuffer != 0) ||
+        // either of these use cases:
+        // use case 1: shared buffer
+        bool sharedBuffer = mSharedBuffer != 0;
+        bool transferAllowed =
             // use case 2: callback transfer mode
             (mTransfer == TRANSFER_CALLBACK) ||
             // use case 3: obtain/release mode
             (mTransfer == TRANSFER_OBTAIN) ||
             // use case 4: synchronous write
             ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
+
+        bool useCaseAllowed = sharedBuffer || transferAllowed;
+        if (!useCaseAllowed) {
+            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, not shared buffer and transfer = %s",
+                  convertTransferToText(mTransfer));
+        }
+
         // sample rates must also match
-        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
+        bool sampleRateAllowed = mSampleRate == mAfSampleRate;
+        if (!sampleRateAllowed) {
+            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
+                  mSampleRate, mAfSampleRate);
+        }
+
+        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
         if (!fastAllowed) {
-            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
-                "track %u Hz, output %u Hz",
-                mTransfer, mSampleRate, mAfSampleRate);
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
         }
     }
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
index cbef1b3..7868318 100644
--- a/media/libaudioclient/PlayerBase.cpp
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -79,7 +79,7 @@
     }
 }
 
-//FIXME temporary method while some AudioTrack state is outside of this class
+//FIXME temporary method while some player state is outside of this class
 void PlayerBase::reportEvent(player_state_t event) {
     servicePlayerEvent(event);
 }
@@ -87,10 +87,30 @@
 status_t PlayerBase::startWithStatus() {
     status_t status = playerStart();
     if (status == NO_ERROR) {
-        ALOGD("PlayerBase::start() from IPlayer");
         servicePlayerEvent(PLAYER_STATE_STARTED);
     } else {
-        ALOGD("PlayerBase::start() no AudioTrack to start from IPlayer");
+        ALOGW("PlayerBase::start() error %d", status);
+    }
+    return status;
+}
+
+status_t PlayerBase::pauseWithStatus() {
+    status_t status = playerPause();
+    if (status == NO_ERROR) {
+        servicePlayerEvent(PLAYER_STATE_PAUSED);
+    } else {
+        ALOGW("PlayerBase::pause() error %d", status);
+    }
+    return status;
+}
+
+
+status_t PlayerBase::stopWithStatus() {
+    status_t status = playerStop();
+    if (status == NO_ERROR) {
+        servicePlayerEvent(PLAYER_STATE_STOPPED);
+    } else {
+        ALOGW("PlayerBase::stop() error %d", status);
     }
     return status;
 }
@@ -98,42 +118,36 @@
 //------------------------------------------------------------------------------
 // Implementation of IPlayer
 void PlayerBase::start() {
+    ALOGD("PlayerBase::start() from IPlayer");
     (void)startWithStatus();
 }
 
 void PlayerBase::pause() {
-    if (playerPause() == NO_ERROR) {
-        ALOGD("PlayerBase::pause() from IPlayer");
-        servicePlayerEvent(PLAYER_STATE_PAUSED);
-    } else {
-        ALOGD("PlayerBase::pause() no AudioTrack to pause from IPlayer");
-    }
+    ALOGD("PlayerBase::pause() from IPlayer");
+    (void)pauseWithStatus();
 }
 
 
 void PlayerBase::stop() {
-    if (playerStop() == NO_ERROR) {
-        ALOGD("PlayerBase::stop() from IPlayer");
-        servicePlayerEvent(PLAYER_STATE_STOPPED);
-    } else {
-        ALOGD("PlayerBase::stop() no AudioTrack to stop from IPlayer");
-    }
+    ALOGD("PlayerBase::stop() from IPlayer");
+    (void)stopWithStatus();
 }
 
 void PlayerBase::setVolume(float vol) {
+    ALOGD("PlayerBase::setVolume() from IPlayer");
     {
         Mutex::Autolock _l(mSettingsLock);
         mVolumeMultiplierL = vol;
         mVolumeMultiplierR = vol;
     }
-    if (playerSetVolume() == NO_ERROR) {
-        ALOGD("PlayerBase::setVolume() from IPlayer");
-    } else {
-        ALOGD("PlayerBase::setVolume() no AudioTrack for volume control from IPlayer");
+    status_t status = playerSetVolume();
+    if (status != NO_ERROR) {
+        ALOGW("PlayerBase::setVolume() error %d", status);
     }
 }
 
 void PlayerBase::setPan(float pan) {
+    ALOGD("PlayerBase::setPan() from IPlayer");
     {
         Mutex::Autolock _l(mSettingsLock);
         pan = min(max(-1.0f, pan), 1.0f);
@@ -145,10 +159,9 @@
             mPanMultiplierR = 1.0f + pan;
         }
     }
-    if (playerSetVolume() == NO_ERROR) {
-        ALOGD("PlayerBase::setPan() from IPlayer");
-    } else {
-        ALOGD("PlayerBase::setPan() no AudioTrack for volume control from IPlayer");
+    status_t status = playerSetVolume();
+    if (status != NO_ERROR) {
+        ALOGW("PlayerBase::setPan() error %d", status);
     }
 }
 
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 1b034b5..825a0a0 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -334,6 +334,12 @@
      */
             status_t getTimestamp(ExtendedTimestamp *timestamp);
 
+    /**
+     * @param transferType
+     * @return text string that matches the enum name
+     */
+    static const char * convertTransferToText(transfer_type transferType);
+
     /* Returns a handle on the audio input used by this AudioRecord.
      *
      * Parameters:
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 5d73df3..c5dfedc 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -564,6 +564,12 @@
      */
             status_t    reload();
 
+    /**
+     * @param transferType
+     * @return text string that matches the enum name
+     */
+            static const char * convertTransferToText(transfer_type transferType);
+
     /* Returns a handle on the audio output used by this AudioTrack.
      *
      * Parameters:
diff --git a/media/libaudioclient/include/media/PlayerBase.h b/media/libaudioclient/include/media/PlayerBase.h
index fe1db7b..e63090b 100644
--- a/media/libaudioclient/include/media/PlayerBase.h
+++ b/media/libaudioclient/include/media/PlayerBase.h
@@ -48,6 +48,8 @@
 
 
             status_t startWithStatus();
+            status_t pauseWithStatus();
+            status_t stopWithStatus();
 
             //FIXME temporary method while some player state is outside of this class
             void reportEvent(player_state_t event);
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 6a5a229..f2b1f10 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -66,7 +66,8 @@
       mNumFramesReceived(0),
       mNumFramesSkipped(0),
       mNumFramesLost(0),
-      mNumClientOwnedBuffers(0) {
+      mNumClientOwnedBuffers(0),
+      mNoMoreFramesToRead(false) {
     ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
             sampleRate, outSampleRate, channelCount);
     CHECK(channelCount == 1 || channelCount == 2);
@@ -178,6 +179,7 @@
 
     mStarted = false;
     mStopSystemTimeUs = -1;
+    mNoMoreFramesToRead = false;
     mFrameAvailableCondition.signal();
 
     mRecord->stop();
@@ -246,6 +248,9 @@
 
     while (mStarted && mBuffersReceived.empty()) {
         mFrameAvailableCondition.wait(mLock);
+        if (mNoMoreFramesToRead) {
+            return OK;
+        }
     }
     if (!mStarted) {
         return OK;
@@ -359,6 +364,8 @@
     if (mStopSystemTimeUs != -1 && timeUs >= mStopSystemTimeUs) {
         ALOGV("Drop Audio frame at %lld  stop time: %lld us",
                 (long long)timeUs, (long long)mStopSystemTimeUs);
+        mNoMoreFramesToRead = true;
+        mFrameAvailableCondition.signal();
         return OK;
     }
 
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 07a51bf..1595be4 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -95,6 +95,7 @@
     int64_t mNumFramesSkipped;
     int64_t mNumFramesLost;
     int64_t mNumClientOwnedBuffers;
+    bool mNoMoreFramesToRead;
 
     List<MediaBuffer * > mBuffersReceived;
 
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 16c7f2d..f1cdea3 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -54,7 +54,8 @@
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp \
-    utils/TagMonitor.cpp
+    utils/TagMonitor.cpp \
+    utils/LatencyHistogram.cpp
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index d6ed3ff..a11f4e2 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -613,6 +613,11 @@
     }
     write(fd, lines.string(), lines.size());
 
+    if (mRequestThread != NULL) {
+        mRequestThread->dumpCaptureRequestLatency(fd,
+                "    ProcessCaptureRequest latency histogram:");
+    }
+
     {
         lines = String8("    Last request sent:\n");
         write(fd, lines.string(), lines.size());
@@ -3426,7 +3431,8 @@
         mCurrentPreCaptureTriggerId(0),
         mRepeatingLastFrameNumber(
             hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
-        mPrepareVideoStream(false) {
+        mPrepareVideoStream(false),
+        mRequestLatency(kRequestLatencyBinSize) {
     mStatusId = statusTracker->addComponent();
 }
 
@@ -3651,6 +3657,9 @@
     // The exit from any possible waits
     mDoPauseSignal.signal();
     mRequestSignal.signal();
+
+    mRequestLatency.log("ProcessCaptureRequest latency histogram");
+    mRequestLatency.reset();
 }
 
 void Camera3Device::RequestThread::checkAndStopRepeatingRequest() {
@@ -3867,11 +3876,14 @@
             mNextRequests.size());
 
     bool submitRequestSuccess = false;
+    nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
     if (mInterface->supportBatchRequest()) {
         submitRequestSuccess = sendRequestsBatch();
     } else {
         submitRequestSuccess = sendRequestsOneByOne();
     }
+    nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+    mRequestLatency.add(tRequestStart, tRequestEnd);
 
     if (useFlushLock) {
         mFlushLock.unlock();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 1ca6811..bfb58c6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -40,6 +40,7 @@
 #include "device3/StatusTracker.h"
 #include "device3/Camera3BufferManager.h"
 #include "utils/TagMonitor.h"
+#include "utils/LatencyHistogram.h"
 #include <camera_metadata_hidden.h>
 
 /**
@@ -699,6 +700,11 @@
          */
         bool isStreamPending(sp<camera3::Camera3StreamInterface>& stream);
 
+        // dump processCaptureRequest latency
+        void dumpCaptureRequestLatency(int fd, const char* name) {
+            mRequestLatency.dump(fd, name);
+        }
+
       protected:
 
         virtual bool threadLoop();
@@ -820,6 +826,9 @@
 
         // Flag indicating if we should prepare video stream for video requests.
         bool               mPrepareVideoStream;
+
+        static const int32_t kRequestLatencyBinSize = 40; // in ms
+        CameraLatencyHistogram mRequestLatency;
     };
     sp<RequestThread> mRequestThread;
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index cb39244..7ad2300 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -85,6 +85,8 @@
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
             mTotalBufferCount, mHandoutTotalBufferCount);
     write(fd, lines.string(), lines.size());
+
+    Camera3Stream::dump(fd, args);
 }
 
 status_t Camera3IOStreamBase::configureQueueLocked() {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index ec0f508..e15aa43 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -43,7 +43,8 @@
         mTraceFirstBuffer(true),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(0) {
+        mConsumerUsage(0),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (mConsumer == NULL) {
         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
@@ -68,7 +69,8 @@
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(0) {
+        mConsumerUsage(0),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
@@ -97,7 +99,8 @@
         mTraceFirstBuffer(true),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(consumerUsage) {
+        mConsumerUsage(consumerUsage),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
     // Deferred consumer only support preview surface format now.
     if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
         ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
@@ -134,7 +137,8 @@
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
-        mConsumerUsage(consumerUsage) {
+        mConsumerUsage(consumerUsage),
+        mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
         mBufferReleasedListener = new BufferReleasedListener(this);
@@ -290,6 +294,9 @@
     write(fd, lines.string(), lines.size());
 
     Camera3IOStreamBase::dump(fd, args);
+
+    mDequeueBufferLatency.dump(fd,
+        "      DequeueBuffer latency histogram:");
 }
 
 status_t Camera3OutputStream::setTransform(int transform) {
@@ -529,7 +536,11 @@
         sp<ANativeWindow> currentConsumer = mConsumer;
         mLock.unlock();
 
+        nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
         res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
+        nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+        mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
+
         mLock.lock();
         if (res != OK) {
             ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
@@ -611,6 +622,9 @@
 
     mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
                                            : STATE_CONSTRUCTED;
+
+    mDequeueBufferLatency.log("Stream %d dequeueBuffer latency histogram", mId);
+    mDequeueBufferLatency.reset();
     return OK;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 98ffb73..97aa7d4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -21,6 +21,7 @@
 #include <gui/IProducerListener.h>
 #include <gui/Surface.h>
 
+#include "utils/LatencyHistogram.h"
 #include "Camera3Stream.h"
 #include "Camera3IOStreamBase.h"
 #include "Camera3OutputStreamInterface.h"
@@ -269,6 +270,9 @@
     void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
     status_t detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    static const int32_t kDequeueLatencyBinSize = 5; // in ms
+    CameraLatencyHistogram mDequeueBufferLatency;
+
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index b45ef77..ba352c4 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -60,7 +60,8 @@
     mOldMaxBuffers(0),
     mPrepared(false),
     mPreparedBufferIdx(0),
-    mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) {
+    mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
+    mBufferLimitLatency(kBufferLimitLatencyBinSize) {
 
     camera3_stream::stream_type = type;
     camera3_stream::width = width;
@@ -459,8 +460,11 @@
     // Wait for new buffer returned back if we are running into the limit.
     if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
         ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
-                __FUNCTION__, camera3_stream::max_buffers);
+                        __FUNCTION__, camera3_stream::max_buffers);
+        nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
         res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
+        nsecs_t waitEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+        mBufferLimitLatency.add(waitStart, waitEnd);
         if (res != OK) {
             if (res == TIMED_OUT) {
                 ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
@@ -655,6 +659,9 @@
     ALOGV("%s: Stream %d: Disconnecting...", __FUNCTION__, mId);
     status_t res = disconnectLocked();
 
+    mBufferLimitLatency.log("Stream %d latency histogram for wait on max_buffers", mId);
+    mBufferLimitLatency.reset();
+
     if (res == -ENOTCONN) {
         // "Already disconnected" -- not an error
         return OK;
@@ -663,6 +670,13 @@
     }
 }
 
+void Camera3Stream::dump(int fd, const Vector<String16> &args) const
+{
+    (void)args;
+    mBufferLimitLatency.dump(fd,
+            "      Latency histogram for wait on max_buffers");
+}
+
 status_t Camera3Stream::getBufferLocked(camera3_stream_buffer *,
         const std::vector<size_t>&) {
     ALOGE("%s: This type of stream does not support output", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 9cdc1b3..b5a9c5d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -25,6 +25,7 @@
 
 #include "hardware/camera3.h"
 
+#include "utils/LatencyHistogram.h"
 #include "Camera3StreamBufferListener.h"
 #include "Camera3StreamInterface.h"
 
@@ -349,7 +350,7 @@
     /**
      * Debug dump of the stream's state.
      */
-    virtual void     dump(int fd, const Vector<String16> &args) const = 0;
+    virtual void     dump(int fd, const Vector<String16> &args) const;
 
     /**
      * Add a camera3 buffer listener. Adding the same listener twice has
@@ -502,6 +503,10 @@
     // Outstanding buffers dequeued from the stream's buffer queue.
     List<buffer_handle_t> mOutstandingBuffers;
 
+    // Latency histogram of the wait time for handout buffer count to drop below
+    // max_buffers.
+    static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
+    CameraLatencyHistogram mBufferLimitLatency;
 }; // class Camera3Stream
 
 }; // namespace camera3
diff --git a/services/camera/libcameraservice/utils/LatencyHistogram.cpp b/services/camera/libcameraservice/utils/LatencyHistogram.cpp
new file mode 100644
index 0000000..538bb6e
--- /dev/null
+++ b/services/camera/libcameraservice/utils/LatencyHistogram.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraLatencyHistogram"
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include "LatencyHistogram.h"
+
+namespace android {
+
+CameraLatencyHistogram::CameraLatencyHistogram(int32_t binSizeMs, int32_t binCount) :
+        mBinSizeMs(binSizeMs),
+        mBinCount(binCount),
+        mBins(binCount),
+        mTotalCount(0) {
+}
+
+void CameraLatencyHistogram::add(nsecs_t start, nsecs_t end) {
+    nsecs_t duration = end - start;
+    int32_t durationMs = static_cast<int32_t>(duration / 1000000LL);
+    int32_t binIndex = durationMs / mBinSizeMs;
+
+    if (binIndex < 0) {
+        binIndex = 0;
+    } else if (binIndex >= mBinCount) {
+        binIndex = mBinCount-1;
+    }
+
+    mBins[binIndex]++;
+    mTotalCount++;
+}
+
+void CameraLatencyHistogram::reset() {
+    mBins.clear();
+    mTotalCount = 0;
+}
+
+void CameraLatencyHistogram::dump(int fd, const char* name) const {
+    if (mTotalCount == 0) {
+        return;
+    }
+
+    String8 lines;
+    lines.appendFormat("%s (%" PRId64 ") samples\n", name, mTotalCount);
+
+    String8 lineBins, lineBinCounts;
+    formatHistogramText(lineBins, lineBinCounts);
+
+    lineBins.append("\n");
+    lineBinCounts.append("\n");
+    lines.append(lineBins);
+    lines.append(lineBinCounts);
+
+    write(fd, lines.string(), lines.size());
+}
+
+void CameraLatencyHistogram::log(const char* fmt, ...) {
+    if (mTotalCount == 0) {
+        return;
+    }
+
+    va_list args;
+    va_start(args, fmt);
+    String8 histogramName = String8::formatV(fmt, args);
+    ALOGI("%s (%" PRId64 ") samples:", histogramName.string(), mTotalCount);
+    va_end(args);
+
+    String8 lineBins, lineBinCounts;
+    formatHistogramText(lineBins, lineBinCounts);
+
+    ALOGI("%s", lineBins.c_str());
+    ALOGI("%s", lineBinCounts.c_str());
+}
+
+void CameraLatencyHistogram::formatHistogramText(
+        String8& lineBins, String8& lineBinCounts) const {
+    lineBins = "  ";
+    lineBinCounts = "  ";
+
+    for (int32_t i = 0; i < mBinCount; i++) {
+        if (i == mBinCount - 1) {
+            lineBins.append("    inf (max ms)");
+        } else {
+            lineBins.appendFormat("%7d", mBinSizeMs*(i+1));
+        }
+        lineBinCounts.appendFormat("   %02.2f", 100.0*mBins[i]/mTotalCount);
+    }
+    lineBinCounts.append(" (%)");
+}
+
+}; //namespace android
diff --git a/services/camera/libcameraservice/utils/LatencyHistogram.h b/services/camera/libcameraservice/utils/LatencyHistogram.h
new file mode 100644
index 0000000..bfd9b1b
--- /dev/null
+++ b/services/camera/libcameraservice/utils/LatencyHistogram.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
+#define ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
+
+#include <vector>
+
+#include <utils/Timers.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+// Histogram for camera latency characteristic
+class CameraLatencyHistogram {
+public:
+    CameraLatencyHistogram() = delete;
+    CameraLatencyHistogram(int32_t binSizeMs, int32_t binCount=10);
+    void add(nsecs_t start, nsecs_t end);
+    void reset();
+
+    void dump(int fd, const char* name) const;
+    void log(const char* format, ...);
+private:
+    int32_t mBinSizeMs;
+    int32_t mBinCount;
+    std::vector<int64_t> mBins;
+    uint64_t mTotalCount;
+
+    void formatHistogramText(String8& lineBins, String8& lineBinCounts) const;
+}; // class CameraLatencyHistogram
+
+}; // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 3dc1feb..5c6825d 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -52,15 +52,17 @@
             assert(false); // There are only two possible directions.
             break;
     }
-    ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
-          endpoint, deviceId, (int)direction);
 
     // If we can't find an existing one then open a new one.
-    if (endpoint == nullptr) {
+    if (endpoint != nullptr) {
+        ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
+              endpoint, deviceId, (int)direction);
+
+    } else {
         if (direction == AAUDIO_DIRECTION_INPUT) {
             AAudioServiceEndpointCapture *capture = new AAudioServiceEndpointCapture(audioService);
             if (capture->open(deviceId) != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+                ALOGE("AAudioEndpointManager::openEndpoint(), open input failed");
                 delete capture;
             } else {
                 mInputs[deviceId] = capture;
@@ -69,17 +71,20 @@
         } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
             AAudioServiceEndpointPlay *player = new AAudioServiceEndpointPlay(audioService);
             if (player->open(deviceId) != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+                ALOGE("AAudioEndpointManager::openEndpoint(), open output failed");
                 delete player;
             } else {
                 mOutputs[deviceId] = player;
                 endpoint = player;
             }
         }
-
+        ALOGD("AAudioEndpointManager::openEndpoint(), created %p for device = %d, dir = %d",
+              endpoint, deviceId, (int)direction);
     }
 
     if (endpoint != nullptr) {
+        ALOGD("AAudioEndpointManager::openEndpoint(), sampleRate = %d, framesPerBurst = %d",
+              endpoint->getSampleRate(), endpoint->getFramesPerBurst());
         // Increment the reference count under this lock.
         endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
     }
@@ -95,9 +100,15 @@
     // Decrement the reference count under this lock.
     int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
     serviceEndpoint->setReferenceCount(newRefCount);
+    ALOGD("AAudioEndpointManager::closeEndpoint(%p) newRefCount = %d",
+          serviceEndpoint, newRefCount);
+
+    // If no longer in use then close and delete it.
     if (newRefCount <= 0) {
         aaudio_direction_t direction = serviceEndpoint->getDirection();
-        int32_t deviceId = serviceEndpoint->getDeviceId();
+        // Track endpoints based on requested deviceId because UNSPECIFIED
+        // can change to a specific device after opening.
+        int32_t deviceId = serviceEndpoint->getRequestedDeviceId();
 
         switch (direction) {
             case AAUDIO_DIRECTION_INPUT:
@@ -109,6 +120,8 @@
         }
 
         serviceEndpoint->close();
+        ALOGD("AAudioEndpointManager::closeEndpoint() delete %p for device %d, dir = %d",
+              serviceEndpoint, deviceId, (int)direction);
         delete serviceEndpoint;
     }
 }
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index db1103d..899ea35 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -35,7 +35,7 @@
 
     /**
      * Find a service endpoint for the given deviceId and direction.
-     * If an endpoint does not already exist then it will try to create one.
+     * If an endpoint does not already exist then try to create one.
      *
      * @param deviceId
      * @param direction
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index c9b9065..b0e0a74 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -89,7 +89,7 @@
         return result;
     } else {
         aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
-        ALOGV("AAudioService::openStream(): handle = 0x%08X", handle);
+        ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
             ALOGE("AAudioService::openStream(): handle table full");
             delete serviceStream;
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index d8ae284..cc2cb44 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -46,6 +46,7 @@
 
 // Set up an EXCLUSIVE MMAP stream that will be shared.
 aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId) {
+    mRequestedDeviceId = deviceId;
     mStreamInternal = getStreamInternal();
 
     AudioStreamBuilder builder;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 50bf049..c271dbd 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -48,6 +48,7 @@
     aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
     aaudio_result_t close();
 
+    int32_t getRequestedDeviceId() const { return mRequestedDeviceId; }
     int32_t getDeviceId() const { return mStreamInternal->getDeviceId(); }
 
     aaudio_direction_t getDirection() const { return mStreamInternal->getDirection(); }
@@ -81,6 +82,7 @@
 
     AudioStreamInternal     *mStreamInternal = nullptr;
     int32_t                  mReferenceCount = 0;
+    int32_t                  mRequestedDeviceId = 0;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
index b977960..89935ae 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.h
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -32,6 +32,9 @@
 
 namespace aaudio {
 
+/**
+ * Contains a mixer and a stream for writing the result of the mix.
+ */
 class AAudioServiceEndpointPlay : public AAudioServiceEndpoint {
 public:
     explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 8f0abc2..ee0e7ed 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -71,12 +71,11 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-
     sendCurrentTimestamp();
     mThreadEnabled.store(false);
     aaudio_result_t result = mAAudioThread.stop();
     if (result != AAUDIO_OK) {
-        processError();
+        processFatalError();
         return result;
     }
     sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
@@ -90,7 +89,7 @@
     mThreadEnabled.store(false);
     aaudio_result_t result = mAAudioThread.stop();
     if (result != AAUDIO_OK) {
-        processError();
+        processFatalError();
         return result;
     }
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
@@ -126,7 +125,7 @@
     ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
 }
 
-void AAudioServiceStreamBase::processError() {
+void AAudioServiceStreamBase::processFatalError() {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
 }
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index ee52c39..46ceeae 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -111,7 +111,7 @@
 
     void run() override; // to implement Runnable
 
-    void processError();
+    void processFatalError();
 
 protected:
     aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
@@ -122,16 +122,16 @@
 
     virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0;
 
-    aaudio_stream_state_t               mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    aaudio_stream_state_t   mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     SharedRingBuffer*  mUpMessageQueue;
     std::mutex         mLockUpMessageQueue;
 
-    AAudioThread        mAAudioThread;
+    AAudioThread       mAAudioThread;
     // This is used by one thread to tell another thread to exit. So it must be atomic.
-    std::atomic<bool>   mThreadEnabled;
+    std::atomic<bool>  mThreadEnabled;
 
     aaudio_format_t    mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t            mFramesPerBurst = 0;
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 97b9937..2f3ec27 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -138,15 +138,19 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    if (deviceId == AAUDIO_UNSPECIFIED) {
+        ALOGW("AAudioServiceStreamMMAP::open() - openMmapStream() failed to set deviceId");
+    }
+
     // Create MMAP/NOIRQ buffer.
     int32_t minSizeFrames = configurationInput.getBufferCapacity();
-    if (minSizeFrames == 0) { // zero will get rejected
+    if (minSizeFrames <= 0) { // zero will get rejected
         minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
     }
     status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
     if (status != OK) {
-        ALOGE("%s: createMmapBuffer() returned status %d, return AAUDIO_ERROR_UNAVAILABLE",
-              __FILE__, status);
+        ALOGE("AAudioServiceStreamMMAP::open() - createMmapBuffer() returned status %d",
+              status);
         return AAUDIO_ERROR_UNAVAILABLE;
     } else {
         ALOGD("createMmapBuffer status %d shared_address = %p buffer_size %d burst_size %d",
@@ -181,6 +185,9 @@
     ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
           mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
 
+    ALOGD("AAudioServiceStreamMMAP::open() actual rate = %d, channels = %d, deviceId = %d\n",
+          mSampleRate, mSamplesPerFrame, deviceId);
+
     // Fill in AAudioStreamConfiguration
     configurationOutput.setSampleRate(mSampleRate);
     configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
@@ -199,7 +206,7 @@
     status_t status = mMmapStream->start(mMmapClient, &mPortHandle);
     if (status != OK) {
         ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", status);
-        processError();
+        processFatalError();
         result = AAudioConvert_androidToAAudioResult(status);
     } else {
         result = AAudioServiceStreamBase::start();
@@ -234,8 +241,6 @@
 aaudio_result_t AAudioServiceStreamMMAP::flush() {
     if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
     // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
-    mState = AAUDIO_STREAM_STATE_FLUSHED;
     return AAudioServiceStreamBase::flush();;
 }
 
@@ -244,13 +249,13 @@
                                                                 int64_t *timeNanos) {
     struct audio_mmap_position position;
     if (mMmapStream == nullptr) {
-        processError();
+        processFatalError();
         return AAUDIO_ERROR_NULL;
     }
     status_t status = mMmapStream->getMmapPosition(&position);
     if (status != OK) {
         ALOGE("sendCurrentTimestamp(): getMmapPosition() returned %d", status);
-        processError();
+        processFatalError();
         return AAudioConvert_androidToAAudioResult(status);
     } else {
         mFramesRead.update32(position.position_frames);
@@ -295,4 +300,4 @@
     parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
     parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
     return AAUDIO_OK;
-}
\ No newline at end of file
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 494b18e..f246fc02 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -34,8 +34,10 @@
 using namespace android;
 using namespace aaudio;
 
-#define MIN_BURSTS_PER_BUFFER   2
-#define MAX_BURSTS_PER_BUFFER   32
+#define MIN_BURSTS_PER_BUFFER       2
+#define DEFAULT_BURSTS_PER_BUFFER   16
+// This is an arbitrary range. TODO review.
+#define MAX_FRAMES_PER_BUFFER       (32 * 1024)
 
 AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
     : mAudioService(audioService)
@@ -46,12 +48,58 @@
     close();
 }
 
+int32_t AAudioServiceStreamShared::calculateBufferCapacity(int32_t requestedCapacityFrames,
+                                                           int32_t framesPerBurst) {
+
+    if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) {
+        ALOGE("AAudioServiceStreamShared::open(), requested capacity %d > max %d",
+              requestedCapacityFrames, MAX_FRAMES_PER_BUFFER);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    // Determine how many bursts will fit in the buffer.
+    int32_t numBursts;
+    if (requestedCapacityFrames == AAUDIO_UNSPECIFIED) {
+        // Use fewer bursts if default is too many.
+        if ((DEFAULT_BURSTS_PER_BUFFER * framesPerBurst) > MAX_FRAMES_PER_BUFFER) {
+            numBursts = MAX_FRAMES_PER_BUFFER / framesPerBurst;
+        } else {
+            numBursts = DEFAULT_BURSTS_PER_BUFFER;
+        }
+    } else {
+        // round up to nearest burst boundary
+        numBursts = (requestedCapacityFrames + framesPerBurst - 1) / framesPerBurst;
+    }
+
+    // Clip to bare minimum.
+    if (numBursts < MIN_BURSTS_PER_BUFFER) {
+        numBursts = MIN_BURSTS_PER_BUFFER;
+    }
+    // Check for numeric overflow.
+    if (numBursts > 0x8000 || framesPerBurst > 0x8000) {
+        ALOGE("AAudioServiceStreamShared::open(), numeric overflow, capacity = %d * %d",
+              numBursts, framesPerBurst);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    int32_t capacityInFrames = numBursts * framesPerBurst;
+
+    // Final sanity check.
+    if (capacityInFrames > MAX_FRAMES_PER_BUFFER) {
+        ALOGE("AAudioServiceStreamShared::open(), calculated capacity %d > max %d",
+              capacityInFrames, MAX_FRAMES_PER_BUFFER);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    ALOGD("AAudioServiceStreamShared::open(), requested capacity = %d frames, actual = %d",
+          requestedCapacityFrames, capacityInFrames);
+    return capacityInFrames;
+}
+
 aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
                      aaudio::AAudioStreamConfiguration &configurationOutput)  {
 
     aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
     if (result != AAUDIO_OK) {
-        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        ALOGE("AAudioServiceStreamBase open() returned %d", result);
         return result;
     }
 
@@ -72,16 +120,18 @@
         mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
     } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
         ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need FLOAT", mAudioFormat);
-        return AAUDIO_ERROR_INVALID_FORMAT;
+        result = AAUDIO_ERROR_INVALID_FORMAT;
+        goto error;
     }
 
     mSampleRate = configurationInput.getSampleRate();
     if (mSampleRate == AAUDIO_UNSPECIFIED) {
         mSampleRate = mServiceEndpoint->getSampleRate();
     } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
-        ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
+        ALOGE("AAudioServiceStreamShared::open(), mSampleRate = %d, need %d",
               mSampleRate, mServiceEndpoint->getSampleRate());
-        return AAUDIO_ERROR_INVALID_RATE;
+        result = AAUDIO_ERROR_INVALID_RATE;
+        goto error;
     }
 
     mSamplesPerFrame = configurationInput.getSamplesPerFrame();
@@ -90,37 +140,51 @@
     } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
         ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
               mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
-        return AAUDIO_ERROR_OUT_OF_RANGE;
+        result = AAUDIO_ERROR_OUT_OF_RANGE;
+        goto error;
     }
 
-    // Determine this stream's shared memory buffer capacity.
     mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
-    int32_t minCapacityFrames = configurationInput.getBufferCapacity();
-    int32_t numBursts = MAX_BURSTS_PER_BUFFER;
-    if (minCapacityFrames != AAUDIO_UNSPECIFIED) {
-        numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
-        if (numBursts < MIN_BURSTS_PER_BUFFER) {
-            numBursts = MIN_BURSTS_PER_BUFFER;
-        } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
-            numBursts = MAX_BURSTS_PER_BUFFER;
-        }
+    ALOGD("AAudioServiceStreamShared::open(), mSampleRate = %d, mFramesPerBurst = %d",
+          mSampleRate, mFramesPerBurst);
+
+    mCapacityInFrames = calculateBufferCapacity(configurationInput.getBufferCapacity(),
+                                     mFramesPerBurst);
+    if (mCapacityInFrames < 0) {
+        result = mCapacityInFrames; // negative error code
+        mCapacityInFrames = 0;
+        goto error;
     }
-    mCapacityInFrames = numBursts * mFramesPerBurst;
-    ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
 
     // Create audio data shared memory buffer for client.
     mAudioDataQueue = new SharedRingBuffer();
-    mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    result = mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::open(), could not allocate FIFO with %d frames",
+              mCapacityInFrames);
+        result = AAUDIO_ERROR_NO_MEMORY;
+        goto error;
+    }
+
+    ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
+          mSampleRate, mSamplesPerFrame, mServiceEndpoint->getDeviceId());
 
     // Fill in configuration for client.
     configurationOutput.setSampleRate(mSampleRate);
     configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
     configurationOutput.setAudioFormat(mAudioFormat);
-    configurationOutput.setDeviceId(deviceId);
+    configurationOutput.setDeviceId(mServiceEndpoint->getDeviceId());
 
-    mServiceEndpoint->registerStream(this);
+    result = mServiceEndpoint->registerStream(this);
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
 
     return AAUDIO_OK;
+
+error:
+    close();
+    return result;
 }
 
 /**
@@ -137,11 +201,11 @@
     aaudio_result_t result = endpoint->startStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     } else {
         result = AAudioServiceStreamBase::start();
     }
-    return AAUDIO_OK;
+    return result;
 }
 
 /**
@@ -154,11 +218,10 @@
     if (endpoint == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Add this stream to the mixer.
     aaudio_result_t result = endpoint->stopStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     }
     return AAudioServiceStreamBase::pause();
 }
@@ -168,11 +231,10 @@
     if (endpoint == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Add this stream to the mixer.
     aaudio_result_t result = endpoint->stopStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     }
     return AAudioServiceStreamBase::stop();
 }
@@ -183,9 +245,17 @@
  * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamShared::flush()  {
-    // TODO make sure we are paused
-    // TODO actually flush the data
-    return AAudioServiceStreamBase::flush() ;
+    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+    if (endpoint == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    if (mState != AAUDIO_STREAM_STATE_PAUSED) {
+        ALOGE("AAudioServiceStreamShared::flush() stream not paused, state = %s",
+            AAudio_convertStreamStateToText(mState));
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    // Data will get flushed when the client receives the FLUSHED event.
+    return AAudioServiceStreamBase::flush();
 }
 
 aaudio_result_t AAudioServiceStreamShared::close()  {
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index dfdbbb3..35af434 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -97,6 +97,14 @@
 
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
 
+    /**
+     * @param requestedCapacityFrames
+     * @param framesPerBurst
+     * @return capacity or negative error
+     */
+    static int32_t calculateBufferCapacity(int32_t requestedCapacityFrames,
+                                            int32_t framesPerBurst);
+
 private:
     android::AAudioService  &mAudioService;
     AAudioServiceEndpoint   *mServiceEndpoint = nullptr;
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index b447725..7f7d465 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -46,6 +46,7 @@
 LOCAL_SHARED_LIBRARIES :=  \
     libaaudio \
     libaudioflinger \
+    libaudioclient \
     libbinder \
     libcutils \
     libmediautils \