Merge "audio policy: allows dumping Audio Policy Mix"
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 12d0da8..9cd3a47 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -21,8 +21,9 @@
 #include <utils/Log.h>
 
 #include <camera/camera2/OutputConfiguration.h>
-#include <gui/Surface.h>
 #include <binder/Parcel.h>
+#include <gui/Surface.h>
+#include <utils/String8.h>
 
 namespace android {
 
@@ -30,8 +31,9 @@
 const int OutputConfiguration::INVALID_ROTATION = -1;
 const int OutputConfiguration::INVALID_SET_ID = -1;
 
-sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
-    return mGbp;
+const std::vector<sp<IGraphicBufferProducer>>&
+        OutputConfiguration::getGraphicBufferProducers() const {
+    return mGbps;
 }
 
 int OutputConfiguration::getRotation() const {
@@ -103,37 +105,60 @@
         return err;
     }
 
-    view::Surface surfaceShim;
-    if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
-        // Read surface failure for deferred surface configuration is expected.
-        if (surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
-                surfaceType == SURFACE_TYPE_SURFACE_TEXTURE) {
-            ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
-                    __FUNCTION__, width, height);
-            err = OK;
-        } else {
-            ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
-            return err;
-        }
+    // numSurfaces is the total number of surfaces for this OutputConfiguration,
+    // regardless the surface is deferred or not.
+    int numSurfaces = 0;
+    if ((err = parcel->readInt32(&numSurfaces)) != OK) {
+        ALOGE("%s: Failed to read maxSurfaces from parcel", __FUNCTION__);
+        return err;
+    }
+    if (numSurfaces < 1) {
+        ALOGE("%s: there has to be at least 1 surface per"
+              " outputConfiguration", __FUNCTION__);
+        return BAD_VALUE;
     }
 
-    mGbp = surfaceShim.graphicBufferProducer;
+    // Read all surfaces from parcel. If a surface is deferred, readFromPacel
+    // returns error, and a null surface is put into the mGbps. We assume all
+    // deferred surfaces are after non-deferred surfaces in the parcel.
+    // TODO: Need better way to detect deferred surface than using error
+    // return from readFromParcel.
+    std::vector<sp<IGraphicBufferProducer>> gbps;
+    for (int i = 0; i < numSurfaces; i++) {
+        view::Surface surfaceShim;
+        if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
+            // Read surface failure for deferred surface configuration is expected.
+            if ((surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
+                    surfaceType == SURFACE_TYPE_SURFACE_TEXTURE)) {
+                ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
+                        __FUNCTION__, width, height);
+                err = OK;
+            } else {
+                ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
+                return err;
+            }
+        }
+        gbps.push_back(surfaceShim.graphicBufferProducer);
+        ALOGV("%s: OutputConfiguration: gbps[%d] : %p, name %s", __FUNCTION__,
+                i, gbps[i].get(), String8(surfaceShim.name).string());
+    }
+
     mRotation = rotation;
     mSurfaceSetID = setID;
     mSurfaceType = surfaceType;
     mWidth = width;
     mHeight = height;
+    mGbps = std::move(gbps);
 
-    ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d,"
-            "surfaceType = %d", __FUNCTION__, mGbp.get(), String8(surfaceShim.name).string(),
-            mRotation, mSurfaceSetID, mSurfaceType);
+    ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d",
+            __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType);
 
     return err;
 }
 
 OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
         int surfaceSetID) {
-    mGbp = gbp;
+    mGbps.push_back(gbp);
     mRotation = rotation;
     mSurfaceSetID = surfaceSetID;
 }
@@ -158,14 +183,53 @@
     err = parcel->writeInt32(mHeight);
     if (err != OK) return err;
 
-    view::Surface surfaceShim;
-    surfaceShim.name = String16("unknown_name"); // name of surface
-    surfaceShim.graphicBufferProducer = mGbp;
-
-    err = surfaceShim.writeToParcel(parcel);
+    int numSurfaces = mGbps.size();
+    err = parcel->writeInt32(numSurfaces);
     if (err != OK) return err;
 
+    for (int i = 0; i < numSurfaces; i++) {
+        view::Surface surfaceShim;
+        surfaceShim.name = String16("unknown_name"); // name of surface
+        surfaceShim.graphicBufferProducer = mGbps[i];
+
+        err = surfaceShim.writeToParcel(parcel);
+        if (err != OK) return err;
+    }
+
     return OK;
 }
 
+bool OutputConfiguration::gbpsEqual(const OutputConfiguration& other) const {
+    const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+            other.getGraphicBufferProducers();
+
+    if (mGbps.size() != otherGbps.size()) {
+        return false;
+    }
+
+    for (size_t i = 0; i < mGbps.size(); i++) {
+        if (mGbps[i] != otherGbps[i]) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+bool OutputConfiguration::gbpsLessThan(const OutputConfiguration& other) const {
+    const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+            other.getGraphicBufferProducers();
+
+    if (mGbps.size() !=  otherGbps.size()) {
+        return mGbps.size() < otherGbps.size();
+    }
+
+    for (size_t i = 0; i < mGbps.size(); i++) {
+        if (mGbps[i] != otherGbps[i]) {
+            return mGbps[i] < otherGbps[i];
+        }
+    }
+
+    return false;
+}
 }; // namespace android
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index cb04c0e..2961e2a 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -38,7 +38,7 @@
         SURFACE_TYPE_SURFACE_VIEW = 0,
         SURFACE_TYPE_SURFACE_TEXTURE = 1
     };
-    sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+    const std::vector<sp<IGraphicBufferProducer>>& getGraphicBufferProducers() const;
     int                        getRotation() const;
     int                        getSurfaceSetID() const;
     int                        getSurfaceType() const;
@@ -65,19 +65,18 @@
             int surfaceSetID = INVALID_SET_ID);
 
     bool operator == (const OutputConfiguration& other) const {
-        return (mGbp == other.mGbp &&
-                mRotation == other.mRotation &&
+        return ( mRotation == other.mRotation &&
                 mSurfaceSetID == other.mSurfaceSetID &&
                 mSurfaceType == other.mSurfaceType &&
                 mWidth == other.mWidth &&
-                mHeight == other.mHeight);
+                mHeight == other.mHeight &&
+                gbpsEqual(other));
     }
     bool operator != (const OutputConfiguration& other) const {
         return !(*this == other);
     }
     bool operator < (const OutputConfiguration& other) const {
         if (*this == other) return false;
-        if (mGbp != other.mGbp) return mGbp < other.mGbp;
         if (mSurfaceSetID != other.mSurfaceSetID) {
             return mSurfaceSetID < other.mSurfaceSetID;
         }
@@ -90,15 +89,20 @@
         if (mHeight != other.mHeight) {
             return mHeight < other.mHeight;
         }
+        if (mRotation != other.mRotation) {
+            return mRotation < other.mRotation;
+        }
 
-        return mRotation < other.mRotation;
+        return gbpsLessThan(other);
     }
     bool operator > (const OutputConfiguration& other) const {
         return (*this != other && !(*this < other));
     }
 
+    bool gbpsEqual(const OutputConfiguration& other) const;
+    bool gbpsLessThan(const OutputConfiguration& other) const;
 private:
-    sp<IGraphicBufferProducer> mGbp;
+    std::vector<sp<IGraphicBufferProducer>> mGbps;
     int                        mRotation;
     int                        mSurfaceSetID;
     int                        mSurfaceType;
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
index 68b3f23..d5899ea 100644
--- a/include/media/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -21,6 +21,7 @@
 #include <sys/types.h>
 
 #include <media/AudioBufferProvider.h>
+#include <media/AudioResamplerPublic.h>
 #include <system/audio.h>
 #include <system/audio_effect.h>
 #include <utils/StrongPointer.h>
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
index 7dd9d40..e812d2a 100644
--- a/include/media/BufferingSettings.h
+++ b/include/media/BufferingSettings.h
@@ -40,6 +40,8 @@
     static const int kNoWatermark = -1;
 
     static bool IsValidBufferingMode(int mode);
+    static bool IsTimeBasedBufferingMode(int mode);
+    static bool IsSizeBasedBufferingMode(int mode);
 
     BufferingMode mInitialBufferingMode;  // for prepare
     BufferingMode mRebufferingMode;  // for playback
@@ -64,6 +66,7 @@
     status_t writeToParcel(Parcel* parcel) const override;
     status_t readFromParcel(const Parcel* parcel) override;
 
+    String8 toString() const;
 };
 
 } // namespace android
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
index aeb1765..697823f 100644
--- a/include/media/OMXBuffer.h
+++ b/include/media/OMXBuffer.h
@@ -58,10 +58,6 @@
     // |codecBuffer|'s size (or 0 if |codecBuffer| is NULL).
     OMXBuffer(const sp<MediaCodecBuffer> &codecBuffer);
 
-    // Constructs a buffer of type kBufferTypePreset with a specified
-    // mRangeLength.
-    explicit OMXBuffer(OMX_U32 rangeLength);
-
     // Constructs a buffer of type kBufferTypeSharedMem.
     OMXBuffer(const sp<IMemory> &mem);
 
@@ -101,6 +97,7 @@
     // kBufferTypePreset
     // If the port is operating in byte buffer mode, mRangeLength is the valid
     // range length. Otherwise the range info should also be ignored.
+    OMX_U32 mRangeOffset;
     OMX_U32 mRangeLength;
 
     // kBufferTypeSharedMem
diff --git a/include/media/RecordBufferConverter.h b/include/media/RecordBufferConverter.h
new file mode 100644
index 0000000..2abc45e
--- /dev/null
+++ b/include/media/RecordBufferConverter.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_RECORD_BUFFER_CONVERTER_H
+#define ANDROID_RECORD_BUFFER_CONVERTER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+
+class AudioResampler;
+class PassthruBufferProvider;
+
+namespace android {
+
+/* The RecordBufferConverter is used for format, channel, and sample rate
+ * conversion for a RecordTrack.
+ *
+ * RecordBufferConverter uses the convert() method rather than exposing a
+ * buffer provider interface; this is to save a memory copy.
+ *
+ * There are legacy conversion requirements for this converter, specifically
+ * due to mono handling, so be careful about modifying.
+ *
+ * Original source audioflinger/Threads.{h,cpp}
+ */
+class RecordBufferConverter
+{
+public:
+    RecordBufferConverter(
+            audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+            uint32_t srcSampleRate,
+            audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+            uint32_t dstSampleRate);
+
+    ~RecordBufferConverter();
+
+    /* Converts input data from an AudioBufferProvider by format, channelMask,
+     * and sampleRate to a destination buffer.
+     *
+     * Parameters
+     *      dst:  buffer to place the converted data.
+     * provider:  buffer provider to obtain source data.
+     *   frames:  number of frames to convert
+     *
+     * Returns the number of frames converted.
+     */
+    size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
+
+    // returns NO_ERROR if constructor was successful
+    status_t initCheck() const {
+        // mSrcChannelMask set on successful updateParameters
+        return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
+    }
+
+    // allows dynamic reconfigure of all parameters
+    status_t updateParameters(
+            audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+            uint32_t srcSampleRate,
+            audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+            uint32_t dstSampleRate);
+
+    // called to reset resampler buffers on record track discontinuity
+    void reset();
+
+private:
+    // format conversion when not using resampler
+    void convertNoResampler(void *dst, const void *src, size_t frames);
+
+    // format conversion when using resampler; modifies src in-place
+    void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
+
+    // user provided information
+    audio_channel_mask_t mSrcChannelMask;
+    audio_format_t       mSrcFormat;
+    uint32_t             mSrcSampleRate;
+    audio_channel_mask_t mDstChannelMask;
+    audio_format_t       mDstFormat;
+    uint32_t             mDstSampleRate;
+
+    // derived information
+    uint32_t             mSrcChannelCount;
+    uint32_t             mDstChannelCount;
+    size_t               mDstFrameSize;
+
+    // format conversion buffer
+    void                *mBuf;
+    size_t               mBufFrames;
+    size_t               mBufFrameSize;
+
+    // resampler info
+    AudioResampler      *mResampler;
+
+    bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
+    bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
+    bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
+    PassthruBufferProvider *mInputConverterProvider;    // converts input to float
+    int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_RECORD_BUFFER_CONVERTER_H
diff --git a/include/media/audiohal/hidl/HalDeathHandler.h b/include/media/audiohal/hidl/HalDeathHandler.h
new file mode 100644
index 0000000..c9b7084
--- /dev/null
+++ b/include/media/audiohal/hidl/HalDeathHandler.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+#define ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+
+#include <functional>
+#include <mutex>
+#include <unordered_map>
+
+#include <hidl/HidlSupport.h>
+#include <utils/Singleton.h>
+
+using android::hardware::hidl_death_recipient;
+using android::hidl::base::V1_0::IBase;
+
+namespace android {
+
+class HalDeathHandler : public hidl_death_recipient, private Singleton<HalDeathHandler> {
+  public:
+    typedef std::function<void()> AtExitHandler;
+
+    // Note that the exit handler gets called using a thread from
+    // RPC threadpool, thus it needs to be thread-safe.
+    void registerAtExitHandler(void* cookie, AtExitHandler handler);
+    void unregisterAtExitHandler(void* cookie);
+
+    // hidl_death_recipient
+    virtual void serviceDied(uint64_t cookie, const wp<IBase>& who);
+
+    // Used both for (un)registering handlers, and for passing to
+    // '(un)linkToDeath'.
+    static sp<HalDeathHandler> getInstance();
+
+  private:
+    friend class Singleton<HalDeathHandler>;
+    typedef std::unordered_map<void*, AtExitHandler> Handlers;
+
+    HalDeathHandler();
+    virtual ~HalDeathHandler();
+
+    sp<HalDeathHandler> mSelf;  // Allows the singleton instance to live forever.
+    std::mutex mHandlersLock;
+    Handlers mHandlers;
+};
+
+}  // namespace android
+
+#endif // ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index be34d02..9130159 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -219,6 +219,8 @@
             status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer);
             status_t        setListener(const sp<MediaPlayerListener>& listener);
+            status_t        getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+            status_t        setBufferingSettings(const BufferingSettings& buffering);
             status_t        prepare();
             status_t        prepareAsync();
             status_t        start();
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index fbb4a67..b460ef7 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -72,7 +72,6 @@
     virtual ~MediaExtractor() {}
 
 private:
-    bool mIsDrm;
 
     typedef bool (*SnifferFunc)(
             const sp<DataSource> &source, String8 *mimeType,
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 8eff914..88a416a 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -23,6 +23,7 @@
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <system/audio.h>
+#include <media/BufferingSettings.h>
 #include <media/MediaPlayerInterface.h>
 
 namespace android {
@@ -90,6 +91,9 @@
 void readFromAMessage(
         const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
 
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering);
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */);
+
 AString nameForFd(int fd);
 
 }  // namespace android
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
index 58b38a6..5e00b77 100644
--- a/media/libaudiohal/Android.mk
+++ b/media/libaudiohal/Android.mk
@@ -26,6 +26,7 @@
 
 LOCAL_SRC_FILES := \
     ConversionHelperHidl.cpp   \
+    HalDeathHandlerHidl.cpp   \
     DeviceHalHidl.cpp          \
     DevicesFactoryHalHidl.cpp  \
     EffectBufferHalHidl.cpp    \
diff --git a/media/libaudiohal/ConversionHelperHidl.h b/media/libaudiohal/ConversionHelperHidl.h
index 00d5b2c..23fb360 100644
--- a/media/libaudiohal/ConversionHelperHidl.h
+++ b/media/libaudiohal/ConversionHelperHidl.h
@@ -52,7 +52,7 @@
         if (!ret.isOk()) {
             emitError(funcName, ret.description().c_str());
         }
-        return ret.isOk() ? OK : UNKNOWN_ERROR;
+        return ret.isOk() ? OK : FAILED_TRANSACTION;
     }
 
     status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
@@ -62,7 +62,7 @@
     template<typename T>
     status_t processReturn(
             const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
-        const status_t st = ret.isOk() ? analyzeResult(retval) : UNKNOWN_ERROR;
+        const status_t st = ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
         if (!ret.isOk()) {
             emitError(funcName, ret.description().c_str());
         }
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.cpp b/media/libaudiohal/DevicesFactoryHalHidl.cpp
index 6444079..a91f145 100644
--- a/media/libaudiohal/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/DevicesFactoryHalHidl.cpp
@@ -20,6 +20,7 @@
 //#define LOG_NDEBUG 0
 
 #include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 
 #include "ConversionHelperHidl.h"
@@ -40,6 +41,11 @@
 
 DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
     mDevicesFactory = IDevicesFactory::getService("audio_devices_factory");
+    if (mDevicesFactory != 0) {
+        // It is assumet that DevicesFactory is owned by AudioFlinger
+        // and thus have the same lifespan.
+        mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
 }
 
 DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
@@ -83,7 +89,7 @@
         else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
         else return NO_INIT;
     }
-    return UNKNOWN_ERROR;
+    return FAILED_TRANSACTION;
 }
 
 } // namespace android
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
index 3fb2f43..6cf6412 100644
--- a/media/libaudiohal/EffectHalHidl.cpp
+++ b/media/libaudiohal/EffectHalHidl.cpp
@@ -160,7 +160,7 @@
         mBuffersChanged = false;
         return OK;
     }
-    return ret.isOk() ? analyzeResult(ret) : UNKNOWN_ERROR;
+    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
 }
 
 status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
@@ -171,6 +171,8 @@
         hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
     }
     status_t status;
+    uint32_t replySizeStub = 0;
+    if (replySize == nullptr) replySize = &replySizeStub;
     Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
             [&](int32_t s, const hidl_vec<uint8_t>& result) {
                 status = s;
@@ -181,7 +183,7 @@
                     }
                 }
             });
-    return status;
+    return ret.isOk() ? status : FAILED_TRANSACTION;
 }
 
 status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
@@ -194,13 +196,13 @@
                     effectDescriptorToHal(result, pDescriptor);
                 }
             });
-    return ret.isOk() ? analyzeResult(retval) : UNKNOWN_ERROR;
+    return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
 }
 
 status_t EffectHalHidl::close() {
     if (mEffect == 0) return NO_INIT;
     Return<Result> ret = mEffect->close();
-    return ret.isOk() ? analyzeResult(ret) : UNKNOWN_ERROR;
+    return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
 }
 
 } // namespace android
diff --git a/media/libaudiohal/HalDeathHandlerHidl.cpp b/media/libaudiohal/HalDeathHandlerHidl.cpp
new file mode 100644
index 0000000..a742671
--- /dev/null
+++ b/media/libaudiohal/HalDeathHandlerHidl.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HalDeathHandler"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+
+namespace android {
+
+ANDROID_SINGLETON_STATIC_INSTANCE(HalDeathHandler);
+
+// static
+sp<HalDeathHandler> HalDeathHandler::getInstance() {
+    return &Singleton<HalDeathHandler>::getInstance();
+}
+
+HalDeathHandler::HalDeathHandler() : mSelf(this) {
+}
+
+HalDeathHandler::~HalDeathHandler() {
+}
+
+void HalDeathHandler::registerAtExitHandler(void* cookie, AtExitHandler handler) {
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    mHandlers.insert({cookie, handler});
+}
+
+void HalDeathHandler::unregisterAtExitHandler(void* cookie) {
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    mHandlers.erase(cookie);
+}
+
+void HalDeathHandler::serviceDied(uint64_t /*cookie*/, const wp<IBase>& /*who*/) {
+    // No matter which of the service objects has died,
+    // we need to run all the registered handlers and crash our process.
+    std::lock_guard<std::mutex> guard(mHandlersLock);
+    for (const auto& handler : mHandlers) {
+        handler.second();
+    }
+    LOG_ALWAYS_FATAL("HAL server crashed, need to restart");
+}
+
+} // namespace android
diff --git a/media/libaudioprocessing/Android.mk b/media/libaudioprocessing/Android.mk
index d47d158..b7ea99e 100644
--- a/media/libaudioprocessing/Android.mk
+++ b/media/libaudioprocessing/Android.mk
@@ -9,6 +9,7 @@
     AudioResamplerSinc.cpp.arm \
     AudioResamplerDyn.cpp.arm \
     BufferProviders.cpp \
+    RecordBufferConverter.cpp \
 
 LOCAL_C_INCLUDES := \
     $(TOP) \
diff --git a/media/libaudioprocessing/RecordBufferConverter.cpp b/media/libaudioprocessing/RecordBufferConverter.cpp
new file mode 100644
index 0000000..54151f5
--- /dev/null
+++ b/media/libaudioprocessing/RecordBufferConverter.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RecordBufferConverter"
+//#define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioMixer.h>  // for UNITY_GAIN_FLOAT
+#include <media/AudioResampler.h>
+#include <media/BufferProviders.h>
+#include <media/RecordBufferConverter.h>
+#include <utils/Log.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+template <typename T>
+static inline T max(const T& a, const T& b)
+{
+    return a > b ? a : b;
+}
+
+namespace android {
+
+RecordBufferConverter::RecordBufferConverter(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate) :
+            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
+            // mSrcFormat
+            // mSrcSampleRate
+            // mDstChannelMask
+            // mDstFormat
+            // mDstSampleRate
+            // mSrcChannelCount
+            // mDstChannelCount
+            // mDstFrameSize
+            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
+            mResampler(NULL),
+            mIsLegacyDownmix(false),
+            mIsLegacyUpmix(false),
+            mRequiresFloat(false),
+            mInputConverterProvider(NULL)
+{
+    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
+            dstChannelMask, dstFormat, dstSampleRate);
+}
+
+RecordBufferConverter::~RecordBufferConverter() {
+    free(mBuf);
+    delete mResampler;
+    delete mInputConverterProvider;
+}
+
+void RecordBufferConverter::reset() {
+    if (mResampler != NULL) {
+        mResampler->reset();
+    }
+}
+
+size_t RecordBufferConverter::convert(void *dst,
+        AudioBufferProvider *provider, size_t frames)
+{
+    if (mInputConverterProvider != NULL) {
+        mInputConverterProvider->setBufferProvider(provider);
+        provider = mInputConverterProvider;
+    }
+
+    if (mResampler == NULL) {
+        ALOGV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                mSrcSampleRate, mSrcFormat, mDstFormat);
+
+        AudioBufferProvider::Buffer buffer;
+        for (size_t i = frames; i > 0; ) {
+            buffer.frameCount = i;
+            status_t status = provider->getNextBuffer(&buffer);
+            if (status != OK || buffer.frameCount == 0) {
+                frames -= i; // cannot fill request.
+                break;
+            }
+            // format convert to destination buffer
+            convertNoResampler(dst, buffer.raw, buffer.frameCount);
+
+            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
+            i -= buffer.frameCount;
+            provider->releaseBuffer(&buffer);
+        }
+    } else {
+         ALOGV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
+
+         // reallocate buffer if needed
+         if (mBufFrameSize != 0 && mBufFrames < frames) {
+             free(mBuf);
+             mBufFrames = frames;
+             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+         }
+        // resampler accumulates, but we only have one source track
+        memset(mBuf, 0, frames * mBufFrameSize);
+        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
+        // format convert to destination buffer
+        convertResampler(dst, mBuf, frames);
+    }
+    return frames;
+}
+
+status_t RecordBufferConverter::updateParameters(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate)
+{
+    // quick evaluation if there is any change.
+    if (mSrcFormat == srcFormat
+            && mSrcChannelMask == srcChannelMask
+            && mSrcSampleRate == srcSampleRate
+            && mDstFormat == dstFormat
+            && mDstChannelMask == dstChannelMask
+            && mDstSampleRate == dstSampleRate) {
+        return NO_ERROR;
+    }
+
+    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
+            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
+            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
+    const bool valid =
+            audio_is_input_channel(srcChannelMask)
+            && audio_is_input_channel(dstChannelMask)
+            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
+            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
+            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
+            ; // no upsampling checks for now
+    if (!valid) {
+        return BAD_VALUE;
+    }
+
+    mSrcFormat = srcFormat;
+    mSrcChannelMask = srcChannelMask;
+    mSrcSampleRate = srcSampleRate;
+    mDstFormat = dstFormat;
+    mDstChannelMask = dstChannelMask;
+    mDstSampleRate = dstSampleRate;
+
+    // compute derived parameters
+    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
+    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
+    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
+
+    // do we need to resample?
+    delete mResampler;
+    mResampler = NULL;
+    if (mSrcSampleRate != mDstSampleRate) {
+        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
+                mSrcChannelCount, mDstSampleRate);
+        mResampler->setSampleRate(mSrcSampleRate);
+        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
+    }
+
+    // are we running legacy channel conversion modes?
+    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
+                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
+    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
+                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
+
+    // do we need to process in float?
+    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
+
+    // do we need a staging buffer to convert for destination (we can still optimize this)?
+    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
+    if (mResampler != NULL) {
+        mBufFrameSize = max(mSrcChannelCount, (uint32_t)FCC_2)
+                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
+    } else {
+        mBufFrameSize = 0;
+    }
+    mBufFrames = 0; // force the buffer to be resized.
+
+    // do we need an input converter buffer provider to give us float?
+    delete mInputConverterProvider;
+    mInputConverterProvider = NULL;
+    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
+        mInputConverterProvider = new ReformatBufferProvider(
+                audio_channel_count_from_in_mask(mSrcChannelMask),
+                mSrcFormat,
+                AUDIO_FORMAT_PCM_FLOAT,
+                256 /* provider buffer frame count */);
+    }
+
+    // do we need a remixer to do channel mask conversion
+    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
+        (void) memcpy_by_index_array_initialization_from_channel_mask(
+                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
+    }
+    return NO_ERROR;
+}
+
+void RecordBufferConverter::convertNoResampler(
+        void *dst, const void *src, size_t frames)
+{
+    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
+    if (mBufFrameSize != 0 && mBufFrames < frames) {
+        free(mBuf);
+        mBufFrames = frames;
+        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+    }
+    // do we need to do legacy upmix and downmix?
+    if (mIsLegacyUpmix || mIsLegacyDownmix) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        if (mIsLegacyUpmix) {
+            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
+                    (const float *)src, frames);
+        } else /*mIsLegacyDownmix */ {
+            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
+                    (const float *)src, frames);
+        }
+        if (mBuf != NULL) {
+            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mDstChannelCount);
+        }
+        return;
+    }
+    // do we need to do channel mask conversion?
+    if (mSrcChannelMask != mDstChannelMask) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        memcpy_by_index_array(dstBuf, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
+        if (dstBuf == dst) {
+            return; // format is the same
+        }
+    }
+    // convert to destination buffer
+    const void *convertBuf = mBuf != NULL ? mBuf : src;
+    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
+            frames * mDstChannelCount);
+}
+
+void RecordBufferConverter::convertResampler(
+        void *dst, /*not-a-const*/ void *src, size_t frames)
+{
+    // src buffer format is ALWAYS float when entering this routine
+    if (mIsLegacyUpmix) {
+        ; // mono to stereo already handled by resampler
+    } else if (mIsLegacyDownmix
+            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
+        // the resampler outputs stereo for mono input channel (a feature?)
+        // must convert to mono
+        downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+    } else if (mSrcChannelMask != mDstChannelMask) {
+        // convert to mono channel again for channel mask conversion (could be skipped
+        // with further optimization).
+        if (mSrcChannelCount == 1) {
+            downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+        }
+        // convert to destination format (in place, OK as float is larger than other types)
+        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
+            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mSrcChannelCount);
+        }
+        // channel convert and save to dst
+        memcpy_by_index_array(dst, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
+        return;
+    }
+    // convert to destination format and save to dst
+    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+            frames * mDstChannelCount);
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
index 8d5e016..a23c000 100644
--- a/media/libaudioprocessing/tests/resampler_tests.cpp
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -32,8 +32,8 @@
 #include <utility>
 #include <vector>
 
-#include <android/log.h>
 #include <gtest/gtest.h>
+#include <log/log.h>
 #include <media/AudioBufferProvider.h>
 
 #include <media/AudioResampler.h>
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index 19d408d..9d29cf1 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -19,12 +19,13 @@
 
 #include <assert.h>
 #include <math.h>
-#include <new>
 #include <stdlib.h>
 #include <string.h>
 #include <time.h>
 
-#include <android/log.h>
+#include <new>
+
+#include <log/log.h>
 
 #include <audio_effects/effect_loudnessenhancer.h>
 #include "dsp/core/dynamic_range_compression.h"
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index f5e11a6..db4d009 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -22,9 +22,10 @@
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
+
 #include <new>
 
-#include <android/log.h>
+#include <log/log.h>
 
 #include "AudioEqualizer.h"
 #include "AudioBiquadFilter.h"
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index 08bf9ae..fce9bed 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -21,7 +21,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <android/log.h>
+#include <log/log.h>
 
 #include "EffectReverb.h"
 #include "EffectsMath.h"
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
index 6dc4a53..a69497e 100644
--- a/media/libmedia/BufferingSettings.cpp
+++ b/media/libmedia/BufferingSettings.cpp
@@ -28,6 +28,16 @@
     return (mode >= BUFFERING_MODE_NONE && mode < BUFFERING_MODE_COUNT);
 }
 
+// static
+bool BufferingSettings::IsTimeBasedBufferingMode(int mode) {
+    return (mode == BUFFERING_MODE_TIME_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
+// static
+bool BufferingSettings::IsSizeBasedBufferingMode(int mode) {
+    return (mode == BUFFERING_MODE_SIZE_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
 BufferingSettings::BufferingSettings()
         : mInitialBufferingMode(BUFFERING_MODE_NONE),
           mRebufferingMode(BUFFERING_MODE_NONE),
@@ -70,4 +80,15 @@
     return OK;
 }
 
+String8 BufferingSettings::toString() const {
+    String8 s;
+    s.appendFormat("initialMode(%d), rebufferingMode(%d), "
+            "initialMarks(%d ms, %d KB), rebufferingMarks(%d, %d)ms, (%d, %d)KB",
+            mInitialBufferingMode, mRebufferingMode,
+            mInitialWatermarkMs, mInitialWatermarkKB,
+            mRebufferingWatermarkLowMs, mRebufferingWatermarkHighMs,
+            mRebufferingWatermarkLowKB, mRebufferingWatermarkHighKB);
+    return s;
+}
+
 } // namespace android
diff --git a/media/libmedia/OMXBuffer.cpp b/media/libmedia/OMXBuffer.cpp
index 914cd5b..8ea70e4 100644
--- a/media/libmedia/OMXBuffer.cpp
+++ b/media/libmedia/OMXBuffer.cpp
@@ -35,14 +35,10 @@
 
 OMXBuffer::OMXBuffer(const sp<MediaCodecBuffer>& codecBuffer)
     : mBufferType(kBufferTypePreset),
+      mRangeOffset(codecBuffer != NULL ? codecBuffer->offset() : 0),
       mRangeLength(codecBuffer != NULL ? codecBuffer->size() : 0) {
 }
 
-OMXBuffer::OMXBuffer(OMX_U32 rangeLength)
-    : mBufferType(kBufferTypePreset),
-      mRangeLength(rangeLength) {
-}
-
 OMXBuffer::OMXBuffer(const sp<IMemory> &mem)
     : mBufferType(kBufferTypeSharedMem),
       mMem(mem) {
@@ -67,6 +63,10 @@
     switch(mBufferType) {
         case kBufferTypePreset:
         {
+            status_t err = parcel->writeUint32(mRangeOffset);
+            if (err != OK) {
+                return err;
+            }
             return parcel->writeUint32(mRangeLength);
         }
 
@@ -97,7 +97,14 @@
     switch(bufferType) {
         case kBufferTypePreset:
         {
-            mRangeLength = parcel->readUint32();
+            status_t err = parcel->readUint32(&mRangeOffset);
+            if (err != OK) {
+                return err;
+            }
+            err = parcel->readUint32(&mRangeLength);
+            if (err != OK) {
+                return err;
+            }
             break;
         }
 
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 699172b..6bba1f1 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -244,6 +244,28 @@
     return mPlayer->setVideoSurfaceTexture(bufferProducer);
 }
 
+status_t MediaPlayer::getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+{
+    ALOGV("getDefaultBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t MediaPlayer::setBufferingSettings(const BufferingSettings& buffering)
+{
+    ALOGV("setBufferingSettings");
+
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) {
+        return NO_INIT;
+    }
+    return mPlayer->setBufferingSettings(buffering);
+}
+
 // must call with lock held
 status_t MediaPlayer::prepareAsync_l()
 {
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 065738e..3199495 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -975,13 +975,8 @@
 status_t MediaPlayerService::Client::setBufferingSettings(
         const BufferingSettings& buffering)
 {
-    ALOGV("[%d] setBufferingSettings(%d, %d, %d, %d, %d, %d, %d, %d)",
-            mConnId, buffering.mInitialBufferingMode, buffering.mRebufferingMode,
-            buffering.mInitialWatermarkMs, buffering.mInitialWatermarkKB,
-            buffering.mRebufferingWatermarkLowMs,
-            buffering.mRebufferingWatermarkHighMs,
-            buffering.mRebufferingWatermarkLowKB,
-            buffering.mRebufferingWatermarkHighKB);
+    ALOGV("[%d] setBufferingSettings{%s}",
+            mConnId, buffering.toString().string());
     sp<MediaPlayerBase> p = getPlayer();
     if (p == 0) return UNKNOWN_ERROR;
     return p->setBufferingSettings(buffering);
@@ -995,13 +990,8 @@
     if (p == 0) return UNKNOWN_ERROR;
     status_t ret = p->getDefaultBufferingSettings(buffering);
     if (ret == NO_ERROR) {
-        ALOGV("[%d] getDefaultBufferingSettings(%d, %d, %d, %d, %d, %d, %d, %d)",
-                mConnId, buffering->mInitialBufferingMode, buffering->mRebufferingMode,
-                buffering->mInitialWatermarkMs, buffering->mInitialWatermarkKB,
-                buffering->mRebufferingWatermarkLowMs,
-                buffering->mRebufferingWatermarkHighMs,
-                buffering->mRebufferingWatermarkLowKB,
-                buffering->mRebufferingWatermarkHighKB);
+        ALOGV("[%d] getDefaultBufferingSettings{%s}",
+                mConnId, buffering->toString().string());
     } else {
         ALOGV("[%d] getDefaultBufferingSettings returned %d", mConnId, ret);
     }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index d1d1077..91a2b7b 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -38,11 +38,12 @@
 
 namespace android {
 
-static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
-static int64_t kHighWaterMarkUs = 5000000ll;  // 5secs
-static int64_t kHighWaterMarkRebufferUs = 15000000ll;  // 15secs
-static const ssize_t kLowWaterMarkBytes = 40000;
-static const ssize_t kHighWaterMarkBytes = 200000;
+static const int kLowWaterMarkMs          = 2000;  // 2secs
+static const int kHighWaterMarkMs         = 5000;  // 5secs
+static const int kHighWaterMarkRebufferMs = 15000;  // 15secs
+
+static const int kLowWaterMarkKB  = 40;
+static const int kHighWaterMarkKB = 200;
 
 NuPlayer::GenericSource::GenericSource(
         const sp<AMessage> &notify,
@@ -237,6 +238,16 @@
     return OK;
 }
 
+status_t NuPlayer::GenericSource::getDefaultBufferingSettings(
+        BufferingSettings* buffering /* nonnull */) {
+    mBufferingMonitor->getDefaultBufferingSettings(buffering);
+    return OK;
+}
+
+status_t NuPlayer::GenericSource::setBufferingSettings(const BufferingSettings& buffering) {
+    return mBufferingMonitor->setBufferingSettings(buffering);
+}
+
 status_t NuPlayer::GenericSource::startSources() {
     // Start the selected A/V tracks now before we start buffering.
     // Widevine sources might re-initialize crypto when starting, if we delay
@@ -618,6 +629,12 @@
           break;
       }
 
+      case kWhatGetTrackInfo:
+      {
+          onGetTrackInfo(msg);
+          break;
+      }
+
       case kWhatSelectTrack:
       {
           onSelectTrack(msg);
@@ -868,6 +885,34 @@
 }
 
 sp<AMessage> NuPlayer::GenericSource::getTrackInfo(size_t trackIndex) const {
+    sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
+    msg->setSize("trackIndex", trackIndex);
+
+    sp<AMessage> response;
+    sp<RefBase> format;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findObject("format", &format));
+        return static_cast<AMessage*>(format.get());
+    } else {
+        return NULL;
+    }
+}
+
+void NuPlayer::GenericSource::onGetTrackInfo(const sp<AMessage>& msg) const {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    sp<AMessage> response = new AMessage;
+    sp<AMessage> format = doGetTrackInfo(trackIndex);
+    response->setObject("format", format);
+
+    sp<AReplyToken> replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+sp<AMessage> NuPlayer::GenericSource::doGetTrackInfo(size_t trackIndex) const {
     size_t trackCount = mSources.size();
     if (trackIndex >= trackCount) {
         return NULL;
@@ -1435,11 +1480,54 @@
       mFirstDequeuedBufferRealUs(-1ll),
       mFirstDequeuedBufferMediaUs(-1ll),
       mlastDequeuedBufferMediaUs(-1ll) {
+      getDefaultBufferingSettings(&mSettings);
 }
 
 NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
 }
 
+void NuPlayer::GenericSource::BufferingMonitor::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_THEN_SIZE;
+    buffering->mInitialWatermarkMs = kHighWaterMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
+    buffering->mRebufferingWatermarkLowKB = kLowWaterMarkKB;
+    buffering->mRebufferingWatermarkHighKB = kHighWaterMarkKB;
+
+    ALOGV("BufferingMonitor::getDefaultBufferingSettings{%s}",
+            buffering->toString().string());
+}
+
+status_t NuPlayer::GenericSource::BufferingMonitor::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    ALOGV("BufferingMonitor::setBufferingSettings{%s}",
+            buffering.toString().string());
+
+    Mutex::Autolock _l(mLock);
+    if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+            || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)
+            || (buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowKB > buffering.mRebufferingWatermarkHighKB)) {
+        return BAD_VALUE;
+    }
+    mSettings = buffering;
+    if (mSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+        mSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+    }
+    if (!mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+        mSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+        mSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+    }
+    if (!mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
+        mSettings.mRebufferingWatermarkLowKB = BufferingSettings::kNoWatermark;
+        mSettings.mRebufferingWatermarkHighKB = INT32_MAX;
+    }
+    return OK;
+}
+
 void NuPlayer::GenericSource::BufferingMonitor::prepare(
         const sp<NuCachedSource2> &cachedSource,
         int64_t durationUs,
@@ -1668,7 +1756,9 @@
 
         stopBufferingIfNecessary_l();
         return;
-    } else if (cachedDurationUs >= 0ll) {
+    }
+
+    if (cachedDurationUs >= 0ll) {
         if (mDurationUs > 0ll) {
             int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
             int percentage = 100.0 * cachedPosUs / mDurationUs;
@@ -1679,36 +1769,40 @@
             notifyBufferingUpdate_l(percentage);
         }
 
-        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec",
-                cachedDurationUs / 1000000.0f);
+        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
 
-        if (cachedDurationUs < kLowWaterMarkUs) {
-            // Take into account the data cached in downstream components to try to avoid
-            // unnecessary pause.
-            if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
-                int64_t downStreamCacheUs = mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
-                        - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
-                if (downStreamCacheUs > 0) {
-                    cachedDurationUs += downStreamCacheUs;
+        if (mPrepareBuffering) {
+            if (cachedDurationUs > mSettings.mInitialWatermarkMs * 1000) {
+                stopBufferingIfNecessary_l();
+            }
+        } else if (mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+            if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+                // Take into account the data cached in downstream components to try to avoid
+                // unnecessary pause.
+                if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
+                    int64_t downStreamCacheUs =
+                        mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
+                            - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
+                    if (downStreamCacheUs > 0) {
+                        cachedDurationUs += downStreamCacheUs;
+                    }
                 }
-            }
 
-            if (cachedDurationUs < kLowWaterMarkUs) {
-                startBufferingIfNecessary_l();
-            }
-        } else {
-            int64_t highWaterMark = mPrepareBuffering ? kHighWaterMarkUs : kHighWaterMarkRebufferUs;
-            if (cachedDurationUs > highWaterMark) {
+                if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+                    startBufferingIfNecessary_l();
+                }
+            } else if (cachedDurationUs > mSettings.mRebufferingWatermarkHighMs * 1000) {
                 stopBufferingIfNecessary_l();
             }
         }
-    } else if (cachedDataRemaining >= 0) {
+    } else if (cachedDataRemaining >= 0
+            && mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
         ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
                 cachedDataRemaining);
 
-        if (cachedDataRemaining < kLowWaterMarkBytes) {
+        if (cachedDataRemaining < (mSettings.mRebufferingWatermarkLowKB << 10)) {
             startBufferingIfNecessary_l();
-        } else if (cachedDataRemaining > kHighWaterMarkBytes) {
+        } else if (cachedDataRemaining > (mSettings.mRebufferingWatermarkHighKB << 10)) {
             stopBufferingIfNecessary_l();
         }
     }
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index a14056f..e1949f3 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -50,6 +50,10 @@
 
     status_t setDataSource(const sp<DataSource>& dataSource);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
 
     virtual void start();
@@ -119,6 +123,9 @@
     public:
         explicit BufferingMonitor(const sp<AMessage> &notify);
 
+        void getDefaultBufferingSettings(BufferingSettings *buffering /* nonnull */);
+        status_t setBufferingSettings(const BufferingSettings &buffering);
+
         // Set up state.
         void prepare(const sp<NuCachedSource2> &cachedSource,
                 int64_t durationUs,
@@ -167,6 +174,7 @@
 
         mutable Mutex mLock;
 
+        BufferingSettings mSettings;
         bool mOffloadAudio;
         int64_t mFirstDequeuedBufferRealUs;
         int64_t mFirstDequeuedBufferMediaUs;
@@ -245,6 +253,9 @@
     void onGetFormatMeta(const sp<AMessage>& msg) const;
     sp<MetaData> doGetFormatMeta(bool audio) const;
 
+    void onGetTrackInfo(const sp<AMessage>& msg) const;
+    sp<AMessage> doGetTrackInfo(size_t trackIndex) const;
+
     void onGetSelectedTrack(const sp<AMessage>& msg) const;
     ssize_t doGetSelectedTrack(media_track_type type) const;
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 51bfad4..05e6201 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -32,6 +32,11 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/Utils.h>
 
+// default buffer prepare/ready/underflow marks
+static const int kReadyMarkMs     = 5000;  // 5 seconds
+static const int kPrepareMarkMs   = 1500;  // 1.5 seconds
+static const int kUnderflowMarkMs = 1000;  // 1 second
+
 namespace android {
 
 NuPlayer::HTTPLiveSource::HTTPLiveSource(
@@ -49,6 +54,7 @@
       mFetchMetaDataGeneration(0),
       mHasMetadata(false),
       mMetadataSelected(false) {
+    getDefaultBufferingSettings(&mBufferingSettings);
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -76,6 +82,42 @@
     }
 }
 
+status_t NuPlayer::HTTPLiveSource::getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mInitialWatermarkMs = kPrepareMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kReadyMarkMs;
+
+    return OK;
+}
+
+status_t NuPlayer::HTTPLiveSource::setBufferingSettings(const BufferingSettings& buffering) {
+    if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+            || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+            || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+                && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)) {
+        return BAD_VALUE;
+    }
+
+    mBufferingSettings = buffering;
+
+    if (mBufferingSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+        mBufferingSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+    }
+    if (mBufferingSettings.mRebufferingMode == BUFFERING_MODE_NONE) {
+        mBufferingSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+        mBufferingSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+    }
+
+    if (mLiveSession != NULL) {
+        mLiveSession->setBufferingSettings(mBufferingSettings);
+    }
+
+    return OK;
+}
+
 void NuPlayer::HTTPLiveSource::prepareAsync() {
     if (mLiveLooper == NULL) {
         mLiveLooper = new ALooper;
@@ -94,6 +136,7 @@
 
     mLiveLooper->registerHandler(mLiveSession);
 
+    mLiveSession->setBufferingSettings(mBufferingSettings);
     mLiveSession->connectAsync(
             mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
 }
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 45fc8c1..2866a6a 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -34,6 +34,10 @@
             const char *url,
             const KeyedVector<String8, String8> *headers);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
 
@@ -80,6 +84,7 @@
     int32_t mFetchMetaDataGeneration;
     bool mHasMetadata;
     bool mMetadataSelected;
+    BufferingSettings mBufferingSettings;
 
     void onSessionNotify(const sp<AMessage> &msg);
     void pollForRawData(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index d1ade1d..4c576a5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -314,6 +314,31 @@
     msg->post();
 }
 
+status_t NuPlayer::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetDefaultBufferingSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, buffering);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::setBufferingSettings(const BufferingSettings& buffering) {
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
 void NuPlayer::prepareAsync() {
     (new AMessage(kWhatPrepare, this))->post();
 }
@@ -508,6 +533,48 @@
             break;
         }
 
+        case kWhatGetDefaultBufferingSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatGetDefaultBufferingSettings");
+            BufferingSettings buffering;
+            status_t err = OK;
+            if (mSource != NULL) {
+                err = mSource->getDefaultBufferingSettings(&buffering);
+            } else {
+                err = INVALID_OPERATION;
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, buffering);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatSetBufferingSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatSetBufferingSettings");
+            BufferingSettings buffering;
+            readFromAMessage(msg, &buffering);
+            status_t err = OK;
+            if (mSource != NULL) {
+                err = mSource->setBufferingSettings(buffering);
+            } else {
+                err = INVALID_OPERATION;
+            }
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         case kWhatPrepare:
         {
             mSource->prepareAsync();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 6f737bb..cc8c97a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -50,6 +50,9 @@
 
     void setDataSourceAsync(const sp<DataSource> &source);
 
+    status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+    status_t setBufferingSettings(const BufferingSettings& buffering);
+
     void prepareAsync();
 
     void setVideoSurfaceTextureAsync(
@@ -137,6 +140,8 @@
         kWhatGetTrackInfo               = 'gTrI',
         kWhatGetSelectedTrack           = 'gSel',
         kWhatSelectTrack                = 'selT',
+        kWhatGetDefaultBufferingSettings = 'gDBS',
+        kWhatSetBufferingSettings       = 'sBuS',
     };
 
     wp<NuPlayerDriver> mDriver;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index f7e56e4..b8bb8fe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -204,6 +204,26 @@
     return OK;
 }
 
+status_t NuPlayerDriver::getDefaultBufferingSettings(BufferingSettings* buffering) {
+    ALOGV("getDefaultBufferingSettings(%p)", this);
+    Mutex::Autolock autoLock(mLock);
+    if (mState == STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t NuPlayerDriver::setBufferingSettings(const BufferingSettings& buffering) {
+    ALOGV("setBufferingSettings(%p)", this);
+    Mutex::Autolock autoLock(mLock);
+    if (mState == STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    return mPlayer->setBufferingSettings(buffering);
+}
+
 status_t NuPlayerDriver::prepare() {
     ALOGV("prepare(%p)", this);
     Mutex::Autolock autoLock(mLock);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 9b784ae..5bfc539 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -44,6 +44,11 @@
 
     virtual status_t setVideoSurfaceTexture(
             const sp<IGraphicBufferProducer> &bufferProducer);
+
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual status_t prepare();
     virtual status_t prepareAsync();
     virtual status_t start();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 6006730..0429ef1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -63,6 +63,10 @@
         : mNotify(notify) {
     }
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) = 0;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
+
     virtual void prepareAsync() = 0;
 
     virtual void start() = 0;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index fb1f31a..9264e49 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -32,11 +32,11 @@
 
 const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
 
-// Buffer Underflow/Prepare/StartServer/Overflow Marks
-const int64_t NuPlayer::RTSPSource::kUnderflowMarkUs   =  1000000ll;
-const int64_t NuPlayer::RTSPSource::kPrepareMarkUs     =  3000000ll;
-const int64_t NuPlayer::RTSPSource::kStartServerMarkUs =  5000000ll;
-const int64_t NuPlayer::RTSPSource::kOverflowMarkUs    = 10000000ll;
+// Default Buffer Underflow/Prepare/StartServer/Overflow Marks
+static const int kUnderflowMarkMs   =  1000;  // 1 second
+static const int kPrepareMarkMs     =  3000;  // 3 seconds
+//static const int kStartServerMarkMs =  5000;
+static const int kOverflowMarkMs    = 10000;  // 10 seconds
 
 NuPlayer::RTSPSource::RTSPSource(
         const sp<AMessage> &notify,
@@ -62,6 +62,7 @@
       mSeekGeneration(0),
       mEOSTimeoutAudio(0),
       mEOSTimeoutVideo(0) {
+    getDefaultBufferingSettings(&mBufferingSettings);
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -83,6 +84,34 @@
     }
 }
 
+status_t NuPlayer::RTSPSource::getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) {
+    buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+    buffering->mInitialWatermarkMs = kPrepareMarkMs;
+    buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+    buffering->mRebufferingWatermarkHighMs = kOverflowMarkMs;
+
+    return OK;
+}
+
+status_t NuPlayer::RTSPSource::setBufferingSettings(const BufferingSettings& buffering) {
+    if (mLooper == NULL) {
+        mBufferingSettings = buffering;
+        return OK;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
 void NuPlayer::RTSPSource::prepareAsync() {
     if (mIsSDP && mHTTPService == NULL) {
         notifyPrepared(BAD_VALUE);
@@ -328,7 +357,8 @@
         int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
 
         // isFinished when duration is 0 checks for EOS result only
-        if (bufferedDurationUs > kPrepareMarkUs || src->isFinished(/* duration */ 0)) {
+        if (bufferedDurationUs > mBufferingSettings.mInitialWatermarkMs * 1000
+                || src->isFinished(/* duration */ 0)) {
             ++preparedCount;
         }
 
@@ -336,13 +366,16 @@
             ++overflowCount;
             ++finishedCount;
         } else {
-            if (bufferedDurationUs < kUnderflowMarkUs) {
+            if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000) {
                 ++underflowCount;
             }
-            if (bufferedDurationUs > kOverflowMarkUs) {
+            if (bufferedDurationUs > mBufferingSettings.mRebufferingWatermarkHighMs * 1000) {
                 ++overflowCount;
             }
-            if (bufferedDurationUs < kStartServerMarkUs) {
+            int64_t startServerMarkUs =
+                    (mBufferingSettings.mRebufferingWatermarkLowMs
+                        + mBufferingSettings.mRebufferingWatermarkHighMs) / 2 * 1000ll;
+            if (bufferedDurationUs < startServerMarkUs) {
                 ++startCount;
             }
         }
@@ -479,6 +512,36 @@
     } else if (msg->what() == kWhatSignalEOS) {
         onSignalEOS(msg);
         return;
+    } else if (msg->what() == kWhatSetBufferingSettings) {
+        sp<AReplyToken> replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+
+        BufferingSettings buffering;
+        readFromAMessage(msg, &buffering);
+
+        status_t err = OK;
+        if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+                || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+                || (buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs
+                    && buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode))) {
+            err = BAD_VALUE;
+        } else {
+            if (buffering.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+                buffering.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+            }
+            if (buffering.mRebufferingMode == BUFFERING_MODE_NONE) {
+                buffering.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+                buffering.mRebufferingWatermarkHighMs = INT32_MAX;
+            }
+
+            mBufferingSettings = buffering;
+        }
+
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", err);
+        response->postReply(replyID);
+
+        return;
     }
 
     CHECK_EQ(msg->what(), (int)kWhatNotify);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 363f8bb..0812991 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -40,6 +40,10 @@
             uid_t uid = 0,
             bool isSDP = false);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
     virtual void stop();
@@ -67,6 +71,7 @@
         kWhatPerformSeek     = 'seek',
         kWhatPollBuffering   = 'poll',
         kWhatSignalEOS       = 'eos ',
+        kWhatSetBufferingSettings = 'sBuS',
     };
 
     enum State {
@@ -81,12 +86,6 @@
         kFlagIncognito = 1,
     };
 
-    // Buffer Prepare/Underflow/Overflow/Resume Marks
-    static const int64_t kPrepareMarkUs;
-    static const int64_t kUnderflowMarkUs;
-    static const int64_t kOverflowMarkUs;
-    static const int64_t kStartServerMarkUs;
-
     struct TrackInfo {
         sp<AnotherPacketSource> mSource;
 
@@ -110,6 +109,7 @@
     bool mBuffering;
     bool mInPreparationPhase;
     bool mEOSPending;
+    BufferingSettings mBufferingSettings;
 
     sp<ALooper> mLooper;
     sp<MyHandler> mHandler;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index d6b1e8c..fc0803b 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -51,6 +51,22 @@
     }
 }
 
+status_t NuPlayer::StreamingSource::getDefaultBufferingSettings(
+        BufferingSettings *buffering /* nonnull */) {
+    *buffering = BufferingSettings();
+    return OK;
+}
+
+status_t NuPlayer::StreamingSource::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    if (buffering.mInitialBufferingMode != BUFFERING_MODE_NONE
+            || buffering.mRebufferingMode != BUFFERING_MODE_NONE) {
+        return BAD_VALUE;
+    }
+
+    return OK;
+}
+
 void NuPlayer::StreamingSource::prepareAsync() {
     if (mLooper == NULL) {
         mLooper = new ALooper;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h
index db88c7f..2e1d2b3 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.h
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h
@@ -32,6 +32,10 @@
             const sp<AMessage> &notify,
             const sp<IStreamSource> &source);
 
+    virtual status_t getDefaultBufferingSettings(
+            BufferingSettings* buffering /* nonnull */) override;
+    virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
     virtual void prepareAsync();
     virtual void start();
 
diff --git a/media/liboboe/include/oboe/OboeAudio.h b/media/liboboe/include/oboe/OboeAudio.h
index 2181b8c..52e3f69 100644
--- a/media/liboboe/include/oboe/OboeAudio.h
+++ b/media/liboboe/include/oboe/OboeAudio.h
@@ -26,7 +26,6 @@
 extern "C" {
 #endif
 
-typedef int32_t OboeDeviceId;
 typedef oboe_handle_t OboeStream;
 typedef oboe_handle_t OboeStreamBuilder;
 
@@ -92,10 +91,18 @@
  *
  * By default, the primary device will be used.
  *
+ * @param builder handle provided by Oboe_createStreamBuilder()
+ * @param deviceId platform specific identifier or OBOE_DEVICE_UNSPECIFIED
  * @return OBOE_OK or a negative error.
  */
 OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
-                                                     OboeDeviceId deviceId);
+                                                     oboe_device_id_t deviceId);
+/**
+ * Passes back requested device ID.
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStreamBuilder_getDeviceId(OboeStreamBuilder builder,
+                                                     oboe_device_id_t *deviceId);
 
 /**
  * Request a sample rate in Hz.
@@ -111,14 +118,14 @@
  * @return OBOE_OK or a negative error.
  */
 OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
-                                              oboe_sample_rate_t sampleRate);
+                                                       oboe_sample_rate_t sampleRate);
 
 /**
  * Returns sample rate in Hertz (samples per second).
  * @return OBOE_OK or a negative error.
  */
 OBOE_API oboe_result_t OboeStreamBuilder_getSampleRate(OboeStreamBuilder builder,
-                                              oboe_sample_rate_t *sampleRate);
+                                                       oboe_sample_rate_t *sampleRate);
 
 
 /**
@@ -362,6 +369,8 @@
 // High priority audio threads
 // ============================================================
 
+typedef void *(oboe_audio_thread_proc_t)(void *);
+
 /**
  * Create a thread associated with a stream. The thread has special properties for
  * low latency audio performance. This thread can be used to implement a callback API.
@@ -378,7 +387,8 @@
  */
 OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
                                      oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*startRoutine)(void *), void *arg);
+                                     oboe_audio_thread_proc_t *threadProc,
+                                     void *arg);
 
 /**
  * Wait until the thread exits or an error occurs.
@@ -475,6 +485,13 @@
 
 /**
  * @param stream handle provided by OboeStreamBuilder_openStream()
+ * @param deviceId pointer to variable to receive the actual device ID
+ * @return OBOE_OK or a negative error.
+ */
+OBOE_API oboe_result_t OboeStream_getDeviceId(OboeStream stream, oboe_device_id_t *deviceId);
+
+/**
+ * @param stream handle provided by OboeStreamBuilder_openStream()
  * @param format pointer to variable to receive the actual data format
  * @return OBOE_OK or a negative error.
  */
@@ -554,4 +571,4 @@
 }
 #endif
 
-#endif //NATIVEOBOE_OBOEAUDIO_H
+#endif //OBOE_OBOEAUDIO_H
diff --git a/media/liboboe/include/oboe/OboeDefinitions.h b/media/liboboe/include/oboe/OboeDefinitions.h
index d80c958..9d56a24 100644
--- a/media/liboboe/include/oboe/OboeDefinitions.h
+++ b/media/liboboe/include/oboe/OboeDefinitions.h
@@ -25,6 +25,10 @@
 
 typedef int32_t  oboe_handle_t; // negative handles are error codes
 typedef int32_t  oboe_result_t;
+/**
+ * A platform specific identifier for a device.
+ */
+typedef int32_t  oboe_device_id_t;
 typedef int32_t  oboe_sample_rate_t;
 /** This is used for small quantities such as the number of frames in a buffer. */
 typedef int32_t  oboe_size_frames_t;
@@ -38,7 +42,6 @@
 typedef int64_t  oboe_position_frames_t;
 
 typedef int64_t  oboe_nanoseconds_t;
-typedef uint32_t oboe_audio_format_t;
 
 /**
  * This is used to represent a value that has not been specified.
@@ -47,6 +50,7 @@
  * and would accept whatever it was given.
  */
 #define OBOE_UNSPECIFIED           0
+#define OBOE_DEVICE_UNSPECIFIED    ((oboe_device_id_t) -1)
 #define OBOE_NANOS_PER_MICROSECOND ((int64_t)1000)
 #define OBOE_NANOS_PER_MILLISECOND (OBOE_NANOS_PER_MICROSECOND * 1000)
 #define OBOE_MILLIS_PER_SECOND     1000
@@ -60,60 +64,15 @@
     OBOE_DIRECTION_COUNT // This should always be last.
 };
 
-enum oboe_datatype_t {
-    OBOE_AUDIO_DATATYPE_INT16,
-    OBOE_AUDIO_DATATYPE_INT32,
-    OBOE_AUDIO_DATATYPE_INT824,
-    OBOE_AUDIO_DATATYPE_UINT8,
-    OBOE_AUDIO_DATATYPE_FLOAT32, // Add new values below.
-    OBOE_AUDIO_DATATYPE_COUNT // This should always be last.
+enum oboe_audio_format_t {
+    OBOE_AUDIO_FORMAT_INVALID = -1,
+    OBOE_AUDIO_FORMAT_UNSPECIFIED = 0,
+    OBOE_AUDIO_FORMAT_PCM16, // TODO rename to _PCM_I16
+    OBOE_AUDIO_FORMAT_PCM_FLOAT,
+    OBOE_AUDIO_FORMAT_PCM824, // TODO rename to _PCM_I8_24
+    OBOE_AUDIO_FORMAT_PCM32  // TODO rename to _PCM_I32
 };
 
-enum oboe_content_t {
-    OBOE_AUDIO_CONTENT_PCM,
-    OBOE_AUDIO_CONTENT_MP3,
-    OBOE_AUDIO_CONTENT_AAC,
-    OBOE_AUDIO_CONTENT_AC3,
-    OBOE_AUDIO_CONTENT_EAC3,
-    OBOE_AUDIO_CONTENT_DTS,
-    OBOE_AUDIO_CONTENT_DTSHD, // Add new values below.
-    OBOE_AUDIO_CONTENT_COUNT // This should always be last.
-};
-
-enum oboe_wrapper_t {
-    OBOE_AUDIO_WRAPPER_NONE,
-    OBOE_AUDIO_WRAPPER_IEC61937, // Add new values below.
-    OBOE_AUDIO_WRAPPER_COUNT // This should always be last.
-};
-
-/**
- * Fields packed into oboe_audio_format_t, from most to least significant bits.
- *   Invalid:1
- *   Reserved:7
- *   Wrapper:8
- *   Content:8
- *   Data Type:8
- */
-#define OBOE_AUDIO_FORMAT(dataType, content, wrapper) \
-    ((oboe_audio_format_t)((wrapper << 16) | (content << 8) | dataType))
-
-#define OBOE_AUDIO_FORMAT_RAW(dataType, content) \
-                OBOE_AUDIO_FORMAT(dataType, content, OBOE_AUDIO_WRAPPER_NONE)
-
-#define OBOE_AUDIO_FORMAT_DATA_TYPE(format) \
-    ((oboe_datatype_t)(format & 0x0FF))
-
-// Define some common formats.
-#define OBOE_AUDIO_FORMAT_PCM16  \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT16, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_PCM_FLOAT \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_FLOAT32, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_PCM824 \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT824, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_PCM32 \
-                OBOE_AUDIO_FORMAT_RAW(OBOE_AUDIO_DATATYPE_INT32, OBOE_AUDIO_CONTENT_PCM)
-#define OBOE_AUDIO_FORMAT_INVALID ((oboe_audio_format_t)-1)
-
 enum {
     OBOE_OK,
     OBOE_ERROR_BASE = -900, // TODO review
diff --git a/media/liboboe/src/Android.mk b/media/liboboe/src/Android.mk
index 7b9a906..59edcb2 100644
--- a/media/liboboe/src/Android.mk
+++ b/media/liboboe/src/Android.mk
@@ -8,28 +8,49 @@
 LOCAL_MODULE := liboboe
 LOCAL_MODULE_TAGS := optional
 
+LIBOBOE_DIR := $(TOP)/frameworks/av/media/liboboe
+LIBOBOE_SRC_DIR := $(LIBOBOE_DIR)/src
+
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils) \
     frameworks/native/include \
     system/core/base/include \
     frameworks/native/media/liboboe/include/include \
     frameworks/av/media/liboboe/include \
+    frameworks/native/include \
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/binding \
+    $(LOCAL_PATH)/client \
     $(LOCAL_PATH)/core \
-    $(LOCAL_PATH)/utility \
-    $(LOCAL_PATH)/legacy
+    $(LOCAL_PATH)/fifo \
+    $(LOCAL_PATH)/legacy \
+    $(LOCAL_PATH)/utility
 
-LOCAL_SRC_FILES += core/AudioStream.cpp
-LOCAL_SRC_FILES += core/AudioStreamBuilder.cpp
-LOCAL_SRC_FILES += core/OboeAudio.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamRecord.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamTrack.cpp
-LOCAL_SRC_FILES += utility/HandleTracker.cpp
-LOCAL_SRC_FILES += utility/OboeUtilities.cpp
+LOCAL_SRC_FILES = \
+    core/AudioStream.cpp \
+    core/AudioStreamBuilder.cpp \
+    core/OboeAudio.cpp \
+    legacy/AudioStreamRecord.cpp \
+    legacy/AudioStreamTrack.cpp \
+    utility/HandleTracker.cpp \
+    utility/OboeUtilities.cpp \
+    fifo/FifoBuffer.cpp \
+    fifo/FifoControllerBase.cpp \
+    client/AudioEndpoint.cpp \
+    client/AudioStreamInternal.cpp \
+    client/IsochronousClockModel.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/AudioEndpointParcelable.cpp \
+    binding/OboeStreamRequest.cpp \
+    binding/OboeStreamConfiguration.cpp \
+    binding/IOboeAudioService.cpp
 
-LOCAL_CFLAGS += -Wno-unused-parameter
-LOCAL_CFLAGS += -Wall -Werror
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
 # By default, all symbols are hidden.
-LOCAL_CFLAGS += -fvisibility=hidden
+# LOCAL_CFLAGS += -fvisibility=hidden
 # OBOE_API is used to explicitly export a function or a variable as a visible symbol.
 LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
 
@@ -47,24 +68,41 @@
     system/core/base/include \
     frameworks/native/media/liboboe/include/include \
     frameworks/av/media/liboboe/include \
+    $(LOCAL_PATH) \
+    $(LOCAL_PATH)/binding \
+    $(LOCAL_PATH)/client \
     $(LOCAL_PATH)/core \
-    $(LOCAL_PATH)/utility \
-    $(LOCAL_PATH)/legacy
+    $(LOCAL_PATH)/fifo \
+    $(LOCAL_PATH)/legacy \
+    $(LOCAL_PATH)/utility
 
-LOCAL_SRC_FILES += core/AudioStream.cpp
-LOCAL_SRC_FILES += core/AudioStreamBuilder.cpp
-LOCAL_SRC_FILES += core/OboeAudio.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamRecord.cpp
-LOCAL_SRC_FILES += legacy/AudioStreamTrack.cpp
-LOCAL_SRC_FILES += utility/HandleTracker.cpp
-LOCAL_SRC_FILES += utility/OboeUtilities.cpp
+LOCAL_SRC_FILES = core/AudioStream.cpp \
+    core/AudioStreamBuilder.cpp \
+    core/OboeAudio.cpp \
+    legacy/AudioStreamRecord.cpp \
+    legacy/AudioStreamTrack.cpp \
+    utility/HandleTracker.cpp \
+    utility/OboeUtilities.cpp \
+    fifo/FifoBuffer.cpp \
+    fifo/FifoControllerBase.cpp \
+    client/AudioEndpoint.cpp \
+    client/AudioStreamInternal.cpp \
+    client/IsochronousClockModel.cpp \
+    binding/SharedMemoryParcelable.cpp \
+    binding/SharedRegionParcelable.cpp \
+    binding/RingBufferParcelable.cpp \
+    binding/AudioEndpointParcelable.cpp \
+    binding/OboeStreamRequest.cpp \
+    binding/OboeStreamConfiguration.cpp \
+    binding/IOboeAudioService.cpp
 
-LOCAL_CFLAGS += -Wno-unused-parameter
-LOCAL_CFLAGS += -Wall -Werror
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
 # By default, all symbols are hidden.
-LOCAL_CFLAGS += -fvisibility=hidden
+# LOCAL_CFLAGS += -fvisibility=hidden
 # OBOE_API is used to explicitly export a function or a variable as a visible symbol.
 LOCAL_CFLAGS += -DOBOE_API='__attribute__((visibility("default")))'
 
-LOCAL_SHARED_LIBRARIES := libaudioclient liblog libutils
+LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.cpp b/media/liboboe/src/binding/AudioEndpointParcelable.cpp
new file mode 100644
index 0000000..096a819
--- /dev/null
+++ b/media/liboboe/src/binding/AudioEndpointParcelable.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by Oboe clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+AudioEndpointParcelable::AudioEndpointParcelable() {}
+
+AudioEndpointParcelable::~AudioEndpointParcelable() {}
+
+/**
+ * Add the file descriptor to the table.
+ * @return index in table or negative error
+ */
+int32_t AudioEndpointParcelable::addFileDescriptor(int fd, int32_t sizeInBytes) {
+    if (mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+        return OBOE_ERROR_OUT_OF_RANGE;
+    }
+    int32_t index = mNumSharedMemories++;
+    mSharedMemories[index].setup(fd, sizeInBytes);
+    return index;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].writeToParcel(parcel);
+    }
+    mUpMessageQueueParcelable.writeToParcel(parcel);
+    mDownMessageQueueParcelable.writeToParcel(parcel);
+    mUpDataQueueParcelable.writeToParcel(parcel);
+    mDownDataQueueParcelable.writeToParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].readFromParcel(parcel);
+    }
+    mUpMessageQueueParcelable.readFromParcel(parcel);
+    mDownMessageQueueParcelable.readFromParcel(parcel);
+    mUpDataQueueParcelable.readFromParcel(parcel);
+    mDownDataQueueParcelable.readFromParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
+    // TODO error check
+    mUpMessageQueueParcelable.resolve(mSharedMemories, &descriptor->upMessageQueueDescriptor);
+    mDownMessageQueueParcelable.resolve(mSharedMemories,
+                                        &descriptor->downMessageQueueDescriptor);
+    mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
+    mDownDataQueueParcelable.resolve(mSharedMemories, &descriptor->downDataQueueDescriptor);
+    return OBOE_OK;
+}
+
+oboe_result_t AudioEndpointParcelable::validate() {
+    oboe_result_t result;
+    if (mNumSharedMemories < 0 || mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+        ALOGE("AudioEndpointParcelable invalid mNumSharedMemories = %d", mNumSharedMemories);
+        return OBOE_ERROR_INTERNAL;
+    }
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        result = mSharedMemories[i].validate();
+        if (result != OBOE_OK) {
+            return result;
+        }
+    }
+    if ((result = mUpMessageQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mUpMessageQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDownMessageQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mDownMessageQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mUpDataQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mUpDataQueueParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDownDataQueueParcelable.validate()) != OBOE_OK) {
+        ALOGE("AudioEndpointParcelable invalid mDownDataQueueParcelable = %d", result);
+        return result;
+    }
+    return OBOE_OK;
+}
+
+void AudioEndpointParcelable::dump() {
+    ALOGD("AudioEndpointParcelable ======================================= BEGIN");
+    ALOGD("AudioEndpointParcelable mNumSharedMemories = %d", mNumSharedMemories);
+    for (int i = 0; i < mNumSharedMemories; i++) {
+        mSharedMemories[i].dump();
+    }
+    ALOGD("AudioEndpointParcelable mUpMessageQueueParcelable =========");
+    mUpMessageQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mDownMessageQueueParcelable =======");
+    mDownMessageQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mUpDataQueueParcelable ============");
+    mUpDataQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable mDownDataQueueParcelable ==========");
+    mDownDataQueueParcelable.dump();
+    ALOGD("AudioEndpointParcelable ======================================= END");
+}
+
diff --git a/media/liboboe/src/binding/AudioEndpointParcelable.h b/media/liboboe/src/binding/AudioEndpointParcelable.h
new file mode 100644
index 0000000..6bdd8a4
--- /dev/null
+++ b/media/liboboe/src/binding/AudioEndpointParcelable.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AUDIOENDPOINTPARCELABLE_H
+#define BINDING_AUDIOENDPOINTPARCELABLE_H
+
+#include <stdint.h>
+
+//#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by Oboe clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+class AudioEndpointParcelable : public Parcelable {
+public:
+    AudioEndpointParcelable();
+    virtual ~AudioEndpointParcelable();
+
+    /**
+     * Add the file descriptor to the table.
+     * @return index in table or negative error
+     */
+    int32_t addFileDescriptor(int fd, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(EndpointDescriptor *descriptor);
+
+    oboe_result_t validate();
+
+    void dump();
+
+public: // TODO add getters
+    // Set capacityInFrames to zero if Queue is unused.
+    RingBufferParcelable    mUpMessageQueueParcelable;   // server to client
+    RingBufferParcelable    mDownMessageQueueParcelable; // to server
+    RingBufferParcelable    mUpDataQueueParcelable;      // eg. record, could share same queue
+    RingBufferParcelable    mDownDataQueueParcelable;    // eg. playback
+
+private:
+    int32_t                 mNumSharedMemories = 0;
+    SharedMemoryParcelable  mSharedMemories[MAX_SHARED_MEMORIES];
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_AUDIOENDPOINTPARCELABLE_H
diff --git a/media/liboboe/src/binding/IOboeAudioService.cpp b/media/liboboe/src/binding/IOboeAudioService.cpp
new file mode 100644
index 0000000..a3437b2
--- /dev/null
+++ b/media/liboboe/src/binding/IOboeAudioService.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/OboeStreamRequest.h"
+#include "binding/OboeStreamConfiguration.h"
+#include "binding/IOboeAudioService.h"
+
+namespace android {
+
+/**
+ * This is used by the Oboe Client to talk to the Oboe Service.
+ *
+ * The order of parameters in the Parcels must match with code in OboeAudioService.cpp.
+ */
+class BpOboeAudioService : public BpInterface<IOboeAudioService>
+{
+public:
+    explicit BpOboeAudioService(const sp<IBinder>& impl)
+        : BpInterface<IOboeAudioService>(impl)
+    {
+    }
+
+    virtual oboe_handle_t openStream(oboe::OboeStreamRequest &request,
+                                     oboe::OboeStreamConfiguration &configuration) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        request.writeToParcel(&data);
+        status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_handle_t stream;
+        reply.readInt32(&stream);
+        configuration.readFromParcel(&reply);
+        return stream;
+    }
+
+    virtual oboe_result_t closeStream(int32_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t getStreamDescription(oboe_handle_t streamHandle,
+                                               AudioEndpointParcelable &parcelable)   {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        parcelable.readFromParcel(&reply);
+        parcelable.dump();
+        oboe_result_t result = parcelable.validate();
+        if (result != OBOE_OK) {
+            return result;
+        }
+        reply.readInt32(&result);
+        return result;
+    }
+
+    // TODO should we wait for a reply?
+    virtual oboe_result_t startStream(oboe_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(START_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t pauseStream(oboe_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t flushStream(oboe_handle_t streamHandle) override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual void tickle() override { // TODO remove after service thread implemented
+        Parcel data;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        remote()->transact(TICKLE, data, nullptr);
+    }
+
+    virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId,
+                                              oboe_nanoseconds_t periodNanoseconds)
+    override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientThreadId);
+        data.writeInt64(periodNanoseconds);
+        status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+    virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId)
+    override {
+        Parcel data, reply;
+        // send command
+        data.writeInterfaceToken(IOboeAudioService::getInterfaceDescriptor());
+        data.writeInt32(streamHandle);
+        data.writeInt32((int32_t) clientThreadId);
+        status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
+        if (err != NO_ERROR) {
+            return OBOE_ERROR_INTERNAL; // TODO consider another error
+        }
+        // parse reply
+        oboe_result_t res;
+        reply.readInt32(&res);
+        return res;
+    }
+
+};
+
+// Implement an interface to the service.
+// This is here so that you don't have to link with liboboe static library.
+IMPLEMENT_META_INTERFACE(OboeAudioService, "IOboeAudioService");
+
+// The order of parameters in the Parcels must match with code in BpOboeAudioService
+
+status_t BnOboeAudioService::onTransact(uint32_t code, const Parcel& data,
+                                        Parcel* reply, uint32_t flags) {
+    OboeStream stream;
+    OboeStreamRequest request;
+    OboeStreamConfiguration configuration;
+    pid_t pid;
+    oboe_nanoseconds_t nanoseconds;
+    oboe_result_t result;
+    ALOGV("BnOboeAudioService::onTransact(%i) %i", code, flags);
+    data.checkInterface(this);
+
+    switch(code) {
+        case OPEN_STREAM: {
+            request.readFromParcel(&data);
+            stream = openStream(request, configuration);
+            ALOGD("BnOboeAudioService::onTransact OPEN_STREAM 0x%08X", stream);
+            reply->writeInt32(stream);
+            configuration.writeToParcel(reply);
+            return NO_ERROR;
+        } break;
+
+        case CLOSE_STREAM: {
+            data.readInt32(&stream);
+            ALOGD("BnOboeAudioService::onTransact CLOSE_STREAM 0x%08X", stream);
+            result = closeStream(stream);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case GET_STREAM_DESCRIPTION: {
+            data.readInt32(&stream);
+            ALOGD("BnOboeAudioService::onTransact GET_STREAM_DESCRIPTION 0x%08X", stream);
+            oboe::AudioEndpointParcelable parcelable;
+            result = getStreamDescription(stream, parcelable);
+            if (result != OBOE_OK) {
+                return -1; // FIXME
+            }
+            parcelable.dump();
+            result = parcelable.validate();
+            if (result != OBOE_OK) {
+                return -1; // FIXME
+            }
+            parcelable.writeToParcel(reply);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case START_STREAM: {
+            data.readInt32(&stream);
+            result = startStream(stream);
+            ALOGD("BnOboeAudioService::onTransact START_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case PAUSE_STREAM: {
+            data.readInt32(&stream);
+            result = pauseStream(stream);
+            ALOGD("BnOboeAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case FLUSH_STREAM: {
+            data.readInt32(&stream);
+            result = flushStream(stream);
+            ALOGD("BnOboeAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case REGISTER_AUDIO_THREAD: {
+            data.readInt32(&stream);
+            data.readInt32(&pid);
+            data.readInt64(&nanoseconds);
+            result = registerAudioThread(stream, pid, nanoseconds);
+            ALOGD("BnOboeAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case UNREGISTER_AUDIO_THREAD: {
+            data.readInt32(&stream);
+            data.readInt32(&pid);
+            result = unregisterAudioThread(stream, pid);
+            ALOGD("BnOboeAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+                    stream, result);
+            reply->writeInt32(result);
+            return NO_ERROR;
+        } break;
+
+        case TICKLE: {
+            ALOGV("BnOboeAudioService::onTransact TICKLE");
+            tickle();
+            return NO_ERROR;
+        } break;
+
+        default:
+            // ALOGW("BnOboeAudioService::onTransact not handled %u", code);
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+} /* namespace android */
diff --git a/media/liboboe/src/binding/IOboeAudioService.h b/media/liboboe/src/binding/IOboeAudioService.h
new file mode 100644
index 0000000..4b4c99c
--- /dev/null
+++ b/media/liboboe/src/binding/IOboeAudioService.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_IOBOEAUDIOSERVICE_H
+#define BINDING_IOBOEAUDIOSERVICE_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <oboe/OboeAudio.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/OboeStreamRequest.h"
+#include "binding/OboeStreamConfiguration.h"
+
+//using android::status_t;
+//using android::IInterface;
+//using android::BnInterface;
+
+using oboe::AudioEndpointParcelable;
+using oboe::OboeStreamRequest;
+using oboe::OboeStreamConfiguration;
+
+namespace android {
+
+// Interface (our AIDL) - Shared by server and client
+class IOboeAudioService : public IInterface {
+public:
+
+    DECLARE_META_INTERFACE(OboeAudioService);
+
+    virtual oboe_handle_t openStream(OboeStreamRequest &request,
+                                     OboeStreamConfiguration &configuration) = 0;
+
+    virtual oboe_result_t closeStream(int32_t streamHandle) = 0;
+
+    /* Get an immutable description of the in-memory queues
+    * used to communicate with the underlying HAL or Service.
+    */
+    virtual oboe_result_t getStreamDescription(oboe_handle_t streamHandle,
+                                               AudioEndpointParcelable &parcelable) = 0;
+
+    /**
+     * Start the flow of data.
+     */
+    virtual oboe_result_t startStream(oboe_handle_t streamHandle) = 0;
+
+    /**
+     * Stop the flow of data such that start() can resume without loss of data.
+     */
+    virtual oboe_result_t pauseStream(oboe_handle_t streamHandle) = 0;
+
+    /**
+     *  Discard any data held by the underlying HAL or Service.
+     */
+    virtual oboe_result_t flushStream(oboe_handle_t streamHandle) = 0;
+
+    /**
+     * Manage the specified thread as a low latency audio thread.
+     */
+    virtual oboe_result_t registerAudioThread(oboe_handle_t streamHandle, pid_t clientThreadId,
+                                              oboe_nanoseconds_t periodNanoseconds) = 0;
+
+    virtual oboe_result_t unregisterAudioThread(oboe_handle_t streamHandle,
+                                                pid_t clientThreadId) = 0;
+
+    /**
+     * Poke server instead of running a background thread.
+     * Cooperative multi-tasking for early development only.
+     * TODO remove tickle() when service has its own thread.
+     */
+    virtual void tickle() { };
+
+};
+
+class BnOboeAudioService : public BnInterface<IOboeAudioService> {
+public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+
+};
+
+} /* namespace android */
+
+#endif //BINDING_IOBOEAUDIOSERVICE_H
diff --git a/media/liboboe/src/binding/OboeServiceDefinitions.h b/media/liboboe/src/binding/OboeServiceDefinitions.h
new file mode 100644
index 0000000..ad00fe2
--- /dev/null
+++ b/media/liboboe/src/binding/OboeServiceDefinitions.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_OBOESERVICEDEFINITIONS_H
+#define BINDING_OBOESERVICEDEFINITIONS_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <oboe/OboeAudio.h>
+
+using android::NO_ERROR;
+using android::IBinder;
+
+namespace android {
+
+enum oboe_commands_t {
+    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+    CLOSE_STREAM,
+    GET_STREAM_DESCRIPTION,
+    START_STREAM,
+    PAUSE_STREAM,
+    FLUSH_STREAM,
+    REGISTER_AUDIO_THREAD,
+    UNREGISTER_AUDIO_THREAD,
+    TICKLE
+};
+
+} // namespace android
+
+namespace oboe {
+
+enum oboe_commands_t {
+    OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+    CLOSE_STREAM,
+    GET_STREAM_DESCRIPTION,
+    START_STREAM,
+    PAUSE_STREAM,
+    FLUSH_STREAM,
+    REGISTER_AUDIO_THREAD,
+    UNREGISTER_AUDIO_THREAD,
+    TICKLE
+};
+
+// TODO Expand this to include all the open parameters.
+typedef struct OboeServiceStreamInfo_s {
+    int32_t             deviceId;
+    int32_t             samplesPerFrame;  // number of channels
+    oboe_sample_rate_t  sampleRate;
+    oboe_audio_format_t audioFormat;
+} OboeServiceStreamInfo;
+
+// This must be a fixed width so it can be in shared memory.
+enum RingbufferFlags : uint32_t {
+    NONE = 0,
+    RATE_ISOCHRONOUS = 0x0001,
+    RATE_ASYNCHRONOUS = 0x0002,
+    COHERENCY_DMA = 0x0004,
+    COHERENCY_ACQUIRE_RELEASE = 0x0008,
+    COHERENCY_AUTO = 0x0010,
+};
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct RingBufferDescriptor_s {
+    uint8_t* dataAddress;       // offset from read or write block
+    int64_t* writeCounterAddress;
+    int64_t* readCounterAddress;
+    int32_t  bytesPerFrame;     // index is in frames
+    int32_t  framesPerBurst;    // for ISOCHRONOUS queues
+    int32_t  capacityInFrames;  // zero if unused
+    RingbufferFlags flags;
+} RingBufferDescriptor;
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct EndpointDescriptor_s {
+    // Set capacityInFrames to zero if Queue is unused.
+    RingBufferDescriptor upMessageQueueDescriptor;   // server to client
+    RingBufferDescriptor downMessageQueueDescriptor; // client to server
+    RingBufferDescriptor upDataQueueDescriptor;      // eg. record
+    RingBufferDescriptor downDataQueueDescriptor;    // eg. playback
+} EndpointDescriptor;
+
+} // namespace oboe
+
+#endif //BINDING_OBOESERVICEDEFINITIONS_H
diff --git a/media/liboboe/src/binding/OboeServiceMessage.h b/media/liboboe/src/binding/OboeServiceMessage.h
new file mode 100644
index 0000000..aa13571
--- /dev/null
+++ b/media/liboboe/src/binding/OboeServiceMessage.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_OBOE_SERVICE_MESSAGE_H
+#define OBOE_OBOE_SERVICE_MESSAGE_H
+
+#include <stdint.h>
+
+#include <oboe/OboeDefinitions.h>
+
+namespace oboe {
+
+// TODO move this an "include" folder for the service.
+
+struct OboeMessageTimestamp {
+    oboe_position_frames_t position;
+    int64_t                deviceOffset; // add to client position to get device position
+    oboe_nanoseconds_t     timestamp;
+};
+
+typedef enum oboe_service_event_e : uint32_t {
+    OBOE_SERVICE_EVENT_STARTED,
+    OBOE_SERVICE_EVENT_PAUSED,
+    OBOE_SERVICE_EVENT_FLUSHED,
+    OBOE_SERVICE_EVENT_CLOSED,
+    OBOE_SERVICE_EVENT_DISCONNECTED
+} oboe_service_event_t;
+
+struct OboeMessageEvent {
+    oboe_service_event_t event;
+    int32_t data1;
+    int64_t data2;
+};
+
+typedef struct OboeServiceMessage_s {
+    enum class code : uint32_t {
+        NOTHING,
+        TIMESTAMP,
+        EVENT,
+    };
+
+    code what;
+    union {
+        OboeMessageTimestamp timestamp;
+        OboeMessageEvent event;
+    };
+} OboeServiceMessage;
+
+
+} /* namespace oboe */
+
+#endif //OBOE_OBOE_SERVICE_MESSAGE_H
diff --git a/media/liboboe/src/binding/OboeStreamConfiguration.cpp b/media/liboboe/src/binding/OboeStreamConfiguration.cpp
new file mode 100644
index 0000000..4b8b5b2
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamConfiguration.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/OboeStreamConfiguration.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+OboeStreamConfiguration::OboeStreamConfiguration() {}
+OboeStreamConfiguration::~OboeStreamConfiguration() {}
+
+status_t OboeStreamConfiguration::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mDeviceId);
+    parcel->writeInt32(mSampleRate);
+    parcel->writeInt32(mSamplesPerFrame);
+    parcel->writeInt32((int32_t) mAudioFormat);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t OboeStreamConfiguration::readFromParcel(const Parcel* parcel) {
+    int32_t temp;
+    parcel->readInt32(&mDeviceId);
+    parcel->readInt32(&mSampleRate);
+    parcel->readInt32(&mSamplesPerFrame);
+    parcel->readInt32(&temp);
+    mAudioFormat = (oboe_audio_format_t) temp;
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t OboeStreamConfiguration::validate() {
+    // Validate results of the open.
+    if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
+        ALOGE("OboeStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
+        return OBOE_ERROR_INTERNAL;
+    }
+
+    if (mSamplesPerFrame < 1 || mSamplesPerFrame >= 32) { // TODO review limits
+        ALOGE("OboeStreamConfiguration.validate() invalid samplesPerFrame = %d", mSamplesPerFrame);
+        return OBOE_ERROR_INTERNAL;
+    }
+
+    switch (mAudioFormat) {
+    case OBOE_AUDIO_FORMAT_PCM16:
+    case OBOE_AUDIO_FORMAT_PCM_FLOAT:
+    case OBOE_AUDIO_FORMAT_PCM824:
+    case OBOE_AUDIO_FORMAT_PCM32:
+        break;
+    default:
+        ALOGE("OboeStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
+        return OBOE_ERROR_INTERNAL;
+    }
+    return OBOE_OK;
+}
+
+void OboeStreamConfiguration::dump() {
+    ALOGD("OboeStreamConfiguration mSampleRate = %d -----", mSampleRate);
+    ALOGD("OboeStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+    ALOGD("OboeStreamConfiguration mAudioFormat = %d", (int)mAudioFormat);
+}
diff --git a/media/liboboe/src/binding/OboeStreamConfiguration.h b/media/liboboe/src/binding/OboeStreamConfiguration.h
new file mode 100644
index 0000000..6bc1924
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamConfiguration.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_OBOE_STREAM_CONFIGURATION_H
+#define BINDING_OBOE_STREAM_CONFIGURATION_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <oboe/OboeDefinitions.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+class OboeStreamConfiguration : public Parcelable {
+public:
+    OboeStreamConfiguration();
+    virtual ~OboeStreamConfiguration();
+
+    oboe_device_id_t getDeviceId() const {
+        return mDeviceId;
+    }
+
+    void setDeviceId(oboe_device_id_t deviceId) {
+        mDeviceId = deviceId;
+    }
+
+    oboe_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    void setSampleRate(oboe_sample_rate_t sampleRate) {
+        mSampleRate = sampleRate;
+    }
+
+    int32_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    void setSamplesPerFrame(int32_t samplesPerFrame) {
+        mSamplesPerFrame = samplesPerFrame;
+    }
+
+    oboe_audio_format_t getAudioFormat() const {
+        return mAudioFormat;
+    }
+
+    void setAudioFormat(oboe_audio_format_t audioFormat) {
+        mAudioFormat = audioFormat;
+    }
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    oboe_device_id_t    mDeviceId        = OBOE_DEVICE_UNSPECIFIED;
+    oboe_sample_rate_t  mSampleRate      = OBOE_UNSPECIFIED;
+    int32_t             mSamplesPerFrame = OBOE_UNSPECIFIED;
+    oboe_audio_format_t mAudioFormat     = OBOE_AUDIO_FORMAT_UNSPECIFIED;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_OBOE_STREAM_CONFIGURATION_H
diff --git a/media/liboboe/src/binding/OboeStreamRequest.cpp b/media/liboboe/src/binding/OboeStreamRequest.cpp
new file mode 100644
index 0000000..5d521d0
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamRequest.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/OboeStreamConfiguration.h"
+#include "binding/OboeStreamRequest.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+OboeStreamRequest::OboeStreamRequest()
+    : mConfiguration()
+    {}
+
+OboeStreamRequest::~OboeStreamRequest() {}
+
+status_t OboeStreamRequest::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32((int32_t) mUserId);
+    parcel->writeInt32((int32_t) mProcessId);
+    mConfiguration.writeToParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t OboeStreamRequest::readFromParcel(const Parcel* parcel) {
+    int32_t temp;
+    parcel->readInt32(&temp);
+    mUserId = (uid_t) temp;
+    parcel->readInt32(&temp);
+    mProcessId = (pid_t) temp;
+    mConfiguration.readFromParcel(parcel);
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t OboeStreamRequest::validate() {
+    return mConfiguration.validate();
+}
+
+void OboeStreamRequest::dump() {
+    ALOGD("OboeStreamRequest mUserId = %d -----", mUserId);
+    ALOGD("OboeStreamRequest mProcessId = %d", mProcessId);
+    mConfiguration.dump();
+}
diff --git a/media/liboboe/src/binding/OboeStreamRequest.h b/media/liboboe/src/binding/OboeStreamRequest.h
new file mode 100644
index 0000000..aab3c97
--- /dev/null
+++ b/media/liboboe/src/binding/OboeStreamRequest.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_OBOE_STREAM_REQUEST_H
+#define BINDING_OBOE_STREAM_REQUEST_H
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/OboeStreamConfiguration.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+class OboeStreamRequest : public Parcelable {
+public:
+    OboeStreamRequest();
+    virtual ~OboeStreamRequest();
+
+    uid_t getUserId() const {
+        return mUserId;
+    }
+
+    void setUserId(uid_t userId) {
+        mUserId = userId;
+    }
+
+    pid_t getProcessId() const {
+        return mProcessId;
+    }
+
+    void setProcessId(pid_t processId) {
+        mProcessId = processId;
+    }
+
+    OboeStreamConfiguration &getConfiguration() {
+        return mConfiguration;
+    }
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    OboeStreamConfiguration  mConfiguration;
+    uid_t    mUserId;
+    pid_t    mProcessId;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_OBOE_STREAM_REQUEST_H
diff --git a/media/liboboe/src/binding/RingBufferParcelable.cpp b/media/liboboe/src/binding/RingBufferParcelable.cpp
new file mode 100644
index 0000000..f097655
--- /dev/null
+++ b/media/liboboe/src/binding/RingBufferParcelable.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+#include "binding/RingBufferParcelable.h"
+
+using namespace oboe;
+
+RingBufferParcelable::RingBufferParcelable() {}
+RingBufferParcelable::~RingBufferParcelable() {}
+
+// TODO This assumes that all three use the same SharedMemoryParcelable
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+                 int32_t dataMemoryOffset,
+                 int32_t dataSizeInBytes,
+                 int32_t readCounterOffset,
+                 int32_t writeCounterOffset,
+                 int32_t counterSizeBytes) {
+    mReadCounterParcelable.setup(sharedMemoryIndex, readCounterOffset, counterSizeBytes);
+    mWriteCounterParcelable.setup(sharedMemoryIndex, writeCounterOffset, counterSizeBytes);
+    mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+                 int32_t dataMemoryOffset,
+                 int32_t dataSizeInBytes) {
+    mReadCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+    mWriteCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+    mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+int32_t RingBufferParcelable::getBytesPerFrame() {
+    return mBytesPerFrame;
+}
+
+void RingBufferParcelable::setBytesPerFrame(int32_t bytesPerFrame) {
+    mBytesPerFrame = bytesPerFrame;
+}
+
+int32_t RingBufferParcelable::getFramesPerBurst() {
+    return mFramesPerBurst;
+}
+
+void RingBufferParcelable::setFramesPerBurst(int32_t framesPerBurst) {
+    mFramesPerBurst = framesPerBurst;
+}
+
+int32_t RingBufferParcelable::getCapacityInFrames() {
+    return mCapacityInFrames;
+}
+
+void RingBufferParcelable::setCapacityInFrames(int32_t capacityInFrames) {
+    mCapacityInFrames = capacityInFrames;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        parcel->writeInt32(mBytesPerFrame);
+        parcel->writeInt32(mFramesPerBurst);
+        parcel->writeInt32(mFlags);
+        mReadCounterParcelable.writeToParcel(parcel);
+        mWriteCounterParcelable.writeToParcel(parcel);
+        mDataParcelable.writeToParcel(parcel);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t RingBufferParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        parcel->readInt32(&mBytesPerFrame);
+        parcel->readInt32(&mFramesPerBurst);
+        parcel->readInt32((int32_t *)&mFlags);
+        mReadCounterParcelable.readFromParcel(parcel);
+        mWriteCounterParcelable.readFromParcel(parcel);
+        mDataParcelable.readFromParcel(parcel);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
+    oboe_result_t result;
+
+    result = mReadCounterParcelable.resolve(memoryParcels,
+                                            (void **) &descriptor->readCounterAddress);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    result = mWriteCounterParcelable.resolve(memoryParcels,
+                                             (void **) &descriptor->writeCounterAddress);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    result = mDataParcelable.resolve(memoryParcels, (void **) &descriptor->dataAddress);
+    if (result != OBOE_OK) {
+        return result;
+    }
+
+    descriptor->bytesPerFrame = mBytesPerFrame;
+    descriptor->framesPerBurst = mFramesPerBurst;
+    descriptor->capacityInFrames = mCapacityInFrames;
+    descriptor->flags = mFlags;
+    return OBOE_OK;
+}
+
+oboe_result_t RingBufferParcelable::validate() {
+    oboe_result_t result;
+    if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
+        ALOGE("RingBufferParcelable invalid mCapacityInFrames = %d", mCapacityInFrames);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mBytesPerFrame < 0 || mBytesPerFrame >= 256) {
+        ALOGE("RingBufferParcelable invalid mBytesPerFrame = %d", mBytesPerFrame);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mFramesPerBurst < 0 || mFramesPerBurst >= 1024) {
+        ALOGE("RingBufferParcelable invalid mFramesPerBurst = %d", mFramesPerBurst);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if ((result = mReadCounterParcelable.validate()) != OBOE_OK) {
+        ALOGE("RingBufferParcelable invalid mReadCounterParcelable = %d", result);
+        return result;
+    }
+    if ((result = mWriteCounterParcelable.validate()) != OBOE_OK) {
+        ALOGE("RingBufferParcelable invalid mWriteCounterParcelable = %d", result);
+        return result;
+    }
+    if ((result = mDataParcelable.validate()) != OBOE_OK) {
+        ALOGE("RingBufferParcelable invalid mDataParcelable = %d", result);
+        return result;
+    }
+    return OBOE_OK;
+}
+
+
+void RingBufferParcelable::dump() {
+    ALOGD("RingBufferParcelable mCapacityInFrames = %d ---------", mCapacityInFrames);
+    if (mCapacityInFrames > 0) {
+        ALOGD("RingBufferParcelable mBytesPerFrame = %d", mBytesPerFrame);
+        ALOGD("RingBufferParcelable mFramesPerBurst = %d", mFramesPerBurst);
+        ALOGD("RingBufferParcelable mFlags = %u", mFlags);
+        mReadCounterParcelable.dump();
+        mWriteCounterParcelable.dump();
+        mDataParcelable.dump();
+    }
+}
diff --git a/media/liboboe/src/binding/RingBufferParcelable.h b/media/liboboe/src/binding/RingBufferParcelable.h
new file mode 100644
index 0000000..9bb695a
--- /dev/null
+++ b/media/liboboe/src/binding/RingBufferParcelable.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_RINGBUFFER_PARCELABLE_H
+#define BINDING_RINGBUFFER_PARCELABLE_H
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/OboeServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+
+namespace oboe {
+
+class RingBufferParcelable : public Parcelable {
+public:
+    RingBufferParcelable();
+    virtual ~RingBufferParcelable();
+
+    // TODO This assumes that all three use the same SharedMemoryParcelable
+    void setupMemory(int32_t sharedMemoryIndex,
+                     int32_t dataMemoryOffset,
+                     int32_t dataSizeInBytes,
+                     int32_t readCounterOffset,
+                     int32_t writeCounterOffset,
+                     int32_t counterSizeBytes);
+
+    void setupMemory(int32_t sharedMemoryIndex,
+                     int32_t dataMemoryOffset,
+                     int32_t dataSizeInBytes);
+
+    int32_t getBytesPerFrame();
+
+    void setBytesPerFrame(int32_t bytesPerFrame);
+
+    int32_t getFramesPerBurst();
+
+    void setFramesPerBurst(int32_t framesPerBurst);
+
+    int32_t getCapacityInFrames();
+
+    void setCapacityInFrames(int32_t capacityInFrames);
+
+    /**
+     * The read and write must be symmetric.
+     */
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
+
+    oboe_result_t validate();
+
+    void dump();
+
+private:
+    SharedRegionParcelable  mReadCounterParcelable;
+    SharedRegionParcelable  mWriteCounterParcelable;
+    SharedRegionParcelable  mDataParcelable;
+    int32_t                 mBytesPerFrame = 0;     // index is in frames
+    int32_t                 mFramesPerBurst = 0;    // for ISOCHRONOUS queues
+    int32_t                 mCapacityInFrames = 0;  // zero if unused
+    RingbufferFlags         mFlags = RingbufferFlags::NONE;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_RINGBUFFER_PARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.cpp b/media/liboboe/src/binding/SharedMemoryParcelable.cpp
new file mode 100644
index 0000000..5b739c0
--- /dev/null
+++ b/media/liboboe/src/binding/SharedMemoryParcelable.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <oboe/OboeDefinitions.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+SharedMemoryParcelable::SharedMemoryParcelable() {}
+SharedMemoryParcelable::~SharedMemoryParcelable() {};
+
+void SharedMemoryParcelable::setup(int fd, int32_t sizeInBytes) {
+    mFd = fd;
+    mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->writeDupFileDescriptor(mFd);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        mFd = dup(parcel->readFileDescriptor());
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+// TODO Add code to unmmap()
+
+oboe_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
+                                              void **regionAddressPtr) {
+    if (offsetInBytes < 0) {
+        ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
+        return OBOE_ERROR_OUT_OF_RANGE;
+    } else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
+        ALOGE("SharedMemoryParcelable out of range, offsetInBytes = %d, "
+              "sizeInBytes = %d, mSizeInBytes = %d",
+              offsetInBytes, sizeInBytes, mSizeInBytes);
+        return OBOE_ERROR_OUT_OF_RANGE;
+    }
+    if (mResolvedAddress == nullptr) {
+        mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
+                                          MAP_SHARED, mFd, 0);
+        if (mResolvedAddress == nullptr) {
+            ALOGE("SharedMemoryParcelable mmap failed for fd = %d", mFd);
+            return OBOE_ERROR_INTERNAL;
+        }
+    }
+    *regionAddressPtr = mResolvedAddress + offsetInBytes;
+    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+    ALOGD("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
+          offsetInBytes, *regionAddressPtr);
+    return OBOE_OK;
+}
+
+int32_t SharedMemoryParcelable::getSizeInBytes() {
+    return mSizeInBytes;
+}
+
+oboe_result_t SharedMemoryParcelable::validate() {
+    if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+        ALOGE("SharedMemoryParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mSizeInBytes > 0) {
+        if (mFd == -1) {
+            ALOGE("SharedMemoryParcelable uninitialized mFd = %d", mFd);
+            return OBOE_ERROR_INTERNAL;
+        }
+    }
+    return OBOE_OK;
+}
+
+void SharedMemoryParcelable::dump() {
+    ALOGD("SharedMemoryParcelable mFd = %d", mFd);
+    ALOGD("SharedMemoryParcelable mSizeInBytes = %d", mSizeInBytes);
+    ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+}
diff --git a/media/liboboe/src/binding/SharedMemoryParcelable.h b/media/liboboe/src/binding/SharedMemoryParcelable.h
new file mode 100644
index 0000000..9585779
--- /dev/null
+++ b/media/liboboe/src/binding/SharedMemoryParcelable.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_SHAREDMEMORYPARCELABLE_H
+#define BINDING_SHAREDMEMORYPARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+// Arbitrary limits for sanity checks. TODO remove after debugging.
+#define MAX_SHARED_MEMORIES (32)
+#define MAX_MMAP_OFFSET (32 * 1024)
+#define MAX_MMAP_SIZE (32 * 1024)
+
+/**
+ * This is a parcelable description of a shared memory referenced by a file descriptor.
+ * It may be divided into several regions.
+ */
+class SharedMemoryParcelable : public Parcelable {
+public:
+    SharedMemoryParcelable();
+    virtual ~SharedMemoryParcelable();
+
+    void setup(int fd, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
+
+    int32_t getSizeInBytes();
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    int mFd = -1;
+    int32_t mSizeInBytes = 0;
+    uint8_t *mResolvedAddress = nullptr;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_SHAREDMEMORYPARCELABLE_H
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.cpp b/media/liboboe/src/binding/SharedRegionParcelable.cpp
new file mode 100644
index 0000000..86ce8f3
--- /dev/null
+++ b/media/liboboe/src/binding/SharedRegionParcelable.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/SharedMemoryParcelable.h"
+#include "binding/SharedRegionParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace oboe;
+
+SharedRegionParcelable::SharedRegionParcelable() {}
+SharedRegionParcelable::~SharedRegionParcelable() {}
+
+void SharedRegionParcelable::setup(int32_t sharedMemoryIndex,
+                                   int32_t offsetInBytes,
+                                   int32_t sizeInBytes) {
+    mSharedMemoryIndex = sharedMemoryIndex;
+    mOffsetInBytes = offsetInBytes;
+    mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
+    parcel->writeInt32(mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->writeInt32(mSharedMemoryIndex);
+        parcel->writeInt32(mOffsetInBytes);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
+    parcel->readInt32(&mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        parcel->readInt32(&mSharedMemoryIndex);
+        parcel->readInt32(&mOffsetInBytes);
+    }
+    return NO_ERROR; // TODO check for errors above
+}
+
+oboe_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
+                                              void **regionAddressPtr) {
+    if (mSizeInBytes == 0) {
+        *regionAddressPtr = nullptr;
+        return OBOE_OK;
+    }
+    if (mSharedMemoryIndex < 0) {
+        ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+        return OBOE_ERROR_INTERNAL;
+    }
+    SharedMemoryParcelable *memoryParcel = &memoryParcels[mSharedMemoryIndex];
+    return memoryParcel->resolve(mOffsetInBytes, mSizeInBytes, regionAddressPtr);
+}
+
+oboe_result_t SharedRegionParcelable::validate() {
+    if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE) {
+        ALOGE("SharedRegionParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+        return OBOE_ERROR_INTERNAL;
+    }
+    if (mSizeInBytes > 0) {
+        if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET) {
+            ALOGE("SharedRegionParcelable invalid mOffsetInBytes = %d", mOffsetInBytes);
+            return OBOE_ERROR_INTERNAL;
+        }
+        if (mSharedMemoryIndex < 0 || mSharedMemoryIndex >= MAX_SHARED_MEMORIES) {
+            ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+            return OBOE_ERROR_INTERNAL;
+        }
+    }
+    return OBOE_OK;
+}
+
+void SharedRegionParcelable::dump() {
+    ALOGD("SharedRegionParcelable mSizeInBytes = %d -----", mSizeInBytes);
+    if (mSizeInBytes > 0) {
+        ALOGD("SharedRegionParcelable mSharedMemoryIndex = %d", mSharedMemoryIndex);
+        ALOGD("SharedRegionParcelable mOffsetInBytes = %d", mOffsetInBytes);
+    }
+}
diff --git a/media/liboboe/src/binding/SharedRegionParcelable.h b/media/liboboe/src/binding/SharedRegionParcelable.h
new file mode 100644
index 0000000..bccdaa8
--- /dev/null
+++ b/media/liboboe/src/binding/SharedRegionParcelable.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_SHAREDREGIONPARCELABLE_H
+#define BINDING_SHAREDREGIONPARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <oboe/OboeDefinitions.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace oboe {
+
+class SharedRegionParcelable : public Parcelable {
+public:
+    SharedRegionParcelable();
+    virtual ~SharedRegionParcelable();
+
+    void setup(int32_t sharedMemoryIndex, int32_t offsetInBytes, int32_t sizeInBytes);
+
+    virtual status_t writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+    oboe_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
+
+    oboe_result_t validate();
+
+    void dump();
+
+protected:
+    int32_t mSharedMemoryIndex = -1;
+    int32_t mOffsetInBytes     = 0;
+    int32_t mSizeInBytes       = 0;
+};
+
+} /* namespace oboe */
+
+#endif //BINDING_SHAREDREGIONPARCELABLE_H
diff --git a/media/liboboe/src/client/AudioEndpoint.cpp b/media/liboboe/src/client/AudioEndpoint.cpp
new file mode 100644
index 0000000..160c37e
--- /dev/null
+++ b/media/liboboe/src/client/AudioEndpoint.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cassert>
+#include <oboe/OboeDefinitions.h>
+
+#include "AudioEndpointParcelable.h"
+#include "AudioEndpoint.h"
+#include "OboeServiceMessage.h"
+
+using namespace android;
+using namespace oboe;
+
+AudioEndpoint::AudioEndpoint()
+    : mOutputFreeRunning(false)
+    , mDataReadCounter(0)
+    , mDataWriteCounter(0)
+{
+}
+
+AudioEndpoint::~AudioEndpoint()
+{
+}
+
+static void AudioEndpoint_validateQueueDescriptor(const char *type,
+                                                  const RingBufferDescriptor *descriptor) {
+    assert(descriptor->capacityInFrames > 0);
+    assert(descriptor->bytesPerFrame > 1);
+    assert(descriptor->dataAddress != nullptr);
+    ALOGD("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
+          type,
+          descriptor->dataAddress);
+    ALOGD("AudioEndpoint_validateQueueDescriptor  readCounter at %p, writeCounter at %p",
+          descriptor->readCounterAddress,
+          descriptor->writeCounterAddress);
+
+    // Try to READ from the data area.
+    uint8_t value = descriptor->dataAddress[0];
+    ALOGD("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
+        (int) value);
+    // Try to WRITE to the data area.
+    descriptor->dataAddress[0] = value;
+    ALOGD("AudioEndpoint_validateQueueDescriptor() wrote successfully");
+
+    if (descriptor->readCounterAddress) {
+        fifo_counter_t counter = *descriptor->readCounterAddress;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
+              (int) counter);
+        *descriptor->readCounterAddress = counter;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
+    }
+    if (descriptor->writeCounterAddress) {
+        fifo_counter_t counter = *descriptor->writeCounterAddress;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
+              (int) counter);
+        *descriptor->writeCounterAddress = counter;
+        ALOGD("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
+    }
+}
+
+void AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
+    AudioEndpoint_validateQueueDescriptor("msg", &pEndpointDescriptor->upMessageQueueDescriptor);
+    AudioEndpoint_validateQueueDescriptor("data", &pEndpointDescriptor->downDataQueueDescriptor);
+}
+
+oboe_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+{
+    oboe_result_t result = OBOE_OK;
+    AudioEndpoint_validateDescriptor(pEndpointDescriptor); // FIXME remove after debugging
+
+    const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
+    assert(descriptor->bytesPerFrame == sizeof(OboeServiceMessage));
+    assert(descriptor->readCounterAddress != nullptr);
+    assert(descriptor->writeCounterAddress != nullptr);
+    mUpCommandQueue = new FifoBuffer(
+            descriptor->bytesPerFrame,
+            descriptor->capacityInFrames,
+            descriptor->readCounterAddress,
+            descriptor->writeCounterAddress,
+            descriptor->dataAddress
+    );
+    /* TODO mDownCommandQueue
+    if (descriptor->capacityInFrames > 0) {
+        descriptor = &pEndpointDescriptor->downMessageQueueDescriptor;
+        mDownCommandQueue = new FifoBuffer(
+                descriptor->capacityInFrames,
+                descriptor->bytesPerFrame,
+                descriptor->readCounterAddress,
+                descriptor->writeCounterAddress,
+                descriptor->dataAddress
+        );
+    }
+     */
+    descriptor = &pEndpointDescriptor->downDataQueueDescriptor;
+    assert(descriptor->capacityInFrames > 0);
+    assert(descriptor->bytesPerFrame > 1);
+    assert(descriptor->bytesPerFrame < 4 * 16); // FIXME just for initial debugging
+    assert(descriptor->framesPerBurst > 0);
+    assert(descriptor->framesPerBurst < 8 * 1024); // FIXME just for initial debugging
+    assert(descriptor->dataAddress != nullptr);
+    ALOGD("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+    ALOGD("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
+    mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
+    ALOGD("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
+                                  ? &mDataReadCounter
+                                  : descriptor->readCounterAddress;
+    int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
+                                  ? &mDataWriteCounter
+                                  : descriptor->writeCounterAddress;
+    mDownDataQueue = new FifoBuffer(
+            descriptor->bytesPerFrame,
+            descriptor->capacityInFrames,
+            readCounterAddress,
+            writeCounterAddress,
+            descriptor->dataAddress
+    );
+    uint32_t threshold = descriptor->capacityInFrames / 2;
+    mDownDataQueue->setThreshold(threshold);
+    return result;
+}
+
+oboe_result_t AudioEndpoint::readUpCommand(OboeServiceMessage *commandPtr)
+{
+    return mUpCommandQueue->read(commandPtr, 1);
+}
+
+oboe_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
+{
+    return mDownDataQueue->write(buffer, numFrames);
+}
+
+void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
+{
+    mDownDataQueue->setReadCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDownDataReadCounter()
+{
+    return mDownDataQueue->getReadCounter();
+}
+
+void AudioEndpoint::setDownDataWriteCounter(fifo_counter_t framesRead)
+{
+    mDownDataQueue->setWriteCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDownDataWriteCounter()
+{
+    return mDownDataQueue->getWriteCounter();
+}
+
+oboe_size_frames_t AudioEndpoint::setBufferSizeInFrames(oboe_size_frames_t requestedFrames,
+                                            oboe_size_frames_t *actualFrames)
+{
+    if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
+        requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
+    }
+    mDownDataQueue->setThreshold(requestedFrames);
+    *actualFrames = mDownDataQueue->getThreshold();
+    return OBOE_OK;
+}
+
+int32_t AudioEndpoint::getBufferSizeInFrames() const
+{
+    return mDownDataQueue->getThreshold();
+}
+
+int32_t AudioEndpoint::getBufferCapacityInFrames() const
+{
+    return (int32_t)mDownDataQueue->getBufferCapacityInFrames();
+}
+
+int32_t AudioEndpoint::getFullFramesAvailable()
+{
+    return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+}
diff --git a/media/liboboe/src/client/AudioEndpoint.h b/media/liboboe/src/client/AudioEndpoint.h
new file mode 100644
index 0000000..6ae8b72
--- /dev/null
+++ b/media/liboboe/src/client/AudioEndpoint.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_AUDIO_ENDPOINT_H
+#define OBOE_AUDIO_ENDPOINT_H
+
+#include <oboe/OboeAudio.h>
+
+#include "OboeServiceMessage.h"
+#include "AudioEndpointParcelable.h"
+#include "fifo/FifoBuffer.h"
+
+namespace oboe {
+
+#define ENDPOINT_DATA_QUEUE_SIZE_MIN   64
+
+/**
+ * A sink for audio.
+ * Used by the client code.
+ */
+class AudioEndpoint {
+
+public:
+    AudioEndpoint();
+    virtual ~AudioEndpoint();
+
+    /**
+     * Configure based on the EndPointDescriptor_t.
+     */
+    oboe_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+
+    /**
+     * Read from a command passed up from the Server.
+     * @return 1 if command received, 0 for no command, or negative error.
+     */
+    oboe_result_t readUpCommand(OboeServiceMessage *commandPtr);
+
+    /**
+     * Non-blocking write.
+     * @return framesWritten or a negative error code.
+     */
+    oboe_result_t writeDataNow(const void *buffer, int32_t numFrames);
+
+    /**
+     * Set the read index in the downData queue.
+     * This is needed if the reader is not updating the index itself.
+     */
+    void setDownDataReadCounter(fifo_counter_t framesRead);
+    fifo_counter_t getDownDataReadCounter();
+
+    void setDownDataWriteCounter(fifo_counter_t framesWritten);
+    fifo_counter_t getDownDataWriteCounter();
+
+    /**
+     * The result is not valid until after configure() is called.
+     *
+     * @return true if the output buffer read position is not updated, eg. DMA
+     */
+    bool isOutputFreeRunning() const { return mOutputFreeRunning; }
+
+    int32_t setBufferSizeInFrames(oboe_size_frames_t requestedFrames,
+                                  oboe_size_frames_t *actualFrames);
+    oboe_size_frames_t getBufferSizeInFrames() const;
+
+    oboe_size_frames_t getBufferCapacityInFrames() const;
+
+    oboe_size_frames_t getFullFramesAvailable();
+
+private:
+    FifoBuffer   * mUpCommandQueue;
+    FifoBuffer   * mDownDataQueue;
+    bool           mOutputFreeRunning;
+    fifo_counter_t mDataReadCounter; // only used if free-running
+    fifo_counter_t mDataWriteCounter; // only used if free-running
+};
+
+} // namespace oboe
+
+#endif //OBOE_AUDIO_ENDPOINT_H
diff --git a/media/liboboe/src/client/AudioStreamInternal.cpp b/media/liboboe/src/client/AudioStreamInternal.cpp
new file mode 100644
index 0000000..0d169e1
--- /dev/null
+++ b/media/liboboe/src/client/AudioStreamInternal.cpp
@@ -0,0 +1,528 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <assert.h>
+
+#include <binder/IServiceManager.h>
+
+#include <oboe/OboeAudio.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+#include "binding/OboeStreamRequest.h"
+#include "binding/OboeStreamConfiguration.h"
+#include "binding/IOboeAudioService.h"
+#include "binding/OboeServiceMessage.h"
+
+#include "AudioStreamInternal.h"
+
+#define LOG_TIMESTAMPS   0
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+
+using namespace oboe;
+
+// Helper function to get access to the "OboeAudioService" service.
+static sp<IOboeAudioService> getOboeAudioService() {
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("OboeAudioService"));
+    // TODO: If the "OboeHack" service is not running, getService times out and binder == 0.
+    sp<IOboeAudioService> service = interface_cast<IOboeAudioService>(binder);
+    return service;
+}
+
+AudioStreamInternal::AudioStreamInternal()
+        : AudioStream()
+        , mClockModel()
+        , mAudioEndpoint()
+        , mServiceStreamHandle(OBOE_HANDLE_INVALID)
+        , mFramesPerBurst(16)
+{
+    // TODO protect against mService being NULL;
+    // TODO Model access to the service on frameworks/av/media/libaudioclient/AudioSystem.cpp
+    mService = getOboeAudioService();
+}
+
+AudioStreamInternal::~AudioStreamInternal() {
+}
+
+oboe_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
+
+    oboe_result_t result = OBOE_OK;
+    OboeStreamRequest request;
+    OboeStreamConfiguration configuration;
+
+    result = AudioStream::open(builder);
+    if (result < 0) {
+        return result;
+    }
+
+    // Build the request.
+    request.setUserId(getuid());
+    request.setProcessId(getpid());
+    request.getConfiguration().setDeviceId(getDeviceId());
+    request.getConfiguration().setSampleRate(getSampleRate());
+    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
+    request.getConfiguration().setAudioFormat(getFormat());
+    request.dump();
+
+    mServiceStreamHandle = mService->openStream(request, configuration);
+    ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
+         (unsigned int)mServiceStreamHandle);
+    if (mServiceStreamHandle < 0) {
+        result = mServiceStreamHandle;
+        ALOGE("AudioStreamInternal.open(): acquireRealtimeStream oboe_result_t = 0x%08X", result);
+    } else {
+        result = configuration.validate();
+        if (result != OBOE_OK) {
+            close();
+            return result;
+        }
+        // Save results of the open.
+        setSampleRate(configuration.getSampleRate());
+        setSamplesPerFrame(configuration.getSamplesPerFrame());
+        setFormat(configuration.getAudioFormat());
+
+        oboe::AudioEndpointParcelable parcelable;
+        result = mService->getStreamDescription(mServiceStreamHandle, parcelable);
+        if (result != OBOE_OK) {
+            ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
+            mService->closeStream(mServiceStreamHandle);
+            return result;
+        }
+        // resolve parcelable into a descriptor
+        parcelable.resolve(&mEndpointDescriptor);
+
+        // Configure endpoint based on descriptor.
+        mAudioEndpoint.configure(&mEndpointDescriptor);
+
+
+        mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+        assert(mFramesPerBurst >= 16);
+        assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
+
+        mClockModel.setSampleRate(getSampleRate());
+        mClockModel.setFramesPerBurst(mFramesPerBurst);
+
+        setState(OBOE_STREAM_STATE_OPEN);
+    }
+    return result;
+}
+
+oboe_result_t AudioStreamInternal::close() {
+    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
+    if (mServiceStreamHandle != OBOE_HANDLE_INVALID) {
+        mService->closeStream(mServiceStreamHandle);
+        mServiceStreamHandle = OBOE_HANDLE_INVALID;
+        return OBOE_OK;
+    } else {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+}
+
+oboe_result_t AudioStreamInternal::requestStart()
+{
+    oboe_nanoseconds_t startTime;
+    ALOGD("AudioStreamInternal(): start()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    startTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+    mClockModel.start(startTime);
+    processTimestamp(0, startTime);
+    setState(OBOE_STREAM_STATE_STARTING);
+    return mService->startStream(mServiceStreamHandle);
+}
+
+oboe_result_t AudioStreamInternal::requestPause()
+{
+    ALOGD("AudioStreamInternal(): pause()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    mClockModel.stop(Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC));
+    setState(OBOE_STREAM_STATE_PAUSING);
+    return mService->pauseStream(mServiceStreamHandle);
+}
+
+oboe_result_t AudioStreamInternal::requestFlush() {
+    ALOGD("AudioStreamInternal(): flush()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    setState(OBOE_STREAM_STATE_FLUSHING);
+    return mService->flushStream(mServiceStreamHandle);
+}
+
+void AudioStreamInternal::onFlushFromServer() {
+    ALOGD("AudioStreamInternal(): onFlushFromServer()");
+    oboe_position_frames_t readCounter = mAudioEndpoint.getDownDataReadCounter();
+    oboe_position_frames_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+    // Bump offset so caller does not see the retrograde motion in getFramesRead().
+    oboe_position_frames_t framesFlushed = writeCounter - readCounter;
+    mFramesOffsetFromService += framesFlushed;
+    // Flush written frames by forcing writeCounter to readCounter.
+    // This is because we cannot move the read counter in the hardware.
+    mAudioEndpoint.setDownDataWriteCounter(readCounter);
+}
+
+oboe_result_t AudioStreamInternal::requestStop()
+{
+    // TODO better implementation of requestStop()
+    oboe_result_t result = requestPause();
+    if (result == OBOE_OK) {
+        oboe_stream_state_t state;
+        result = waitForStateChange(OBOE_STREAM_STATE_PAUSING,
+                                    &state,
+                                    500 * OBOE_NANOS_PER_MILLISECOND);// TODO temporary code
+        if (result == OBOE_OK) {
+            result = requestFlush();
+        }
+    }
+    return result;
+}
+
+oboe_result_t AudioStreamInternal::registerThread() {
+    ALOGD("AudioStreamInternal(): registerThread()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    return mService->registerAudioThread(mServiceStreamHandle,
+                                         gettid(),
+                                         getPeriodNanoseconds());
+}
+
+oboe_result_t AudioStreamInternal::unregisterThread() {
+    ALOGD("AudioStreamInternal(): unregisterThread()");
+    if (mServiceStreamHandle == OBOE_HANDLE_INVALID) {
+        return OBOE_ERROR_INVALID_STATE;
+    }
+    return mService->unregisterAudioThread(mServiceStreamHandle, gettid());
+}
+
+// TODO use oboe_clockid_t all the way down to AudioClock
+oboe_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+                           oboe_position_frames_t *framePosition,
+                           oboe_nanoseconds_t *timeNanoseconds) {
+// TODO implement using real HAL
+    oboe_nanoseconds_t time = AudioClock::getNanoseconds();
+    *framePosition = mClockModel.convertTimeToPosition(time);
+    *timeNanoseconds = time + (10 * OBOE_NANOS_PER_MILLISECOND); // Fake hardware delay
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamInternal::updateState() {
+    return processCommands();
+}
+
+#if LOG_TIMESTAMPS
+static void AudioStreamInternal_LogTimestamp(OboeServiceMessage &command) {
+    static int64_t oldPosition = 0;
+    static oboe_nanoseconds_t oldTime = 0;
+    int64_t framePosition = command.timestamp.position;
+    oboe_nanoseconds_t nanoTime = command.timestamp.timestamp;
+    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+         (long long) framePosition,
+         (long long) nanoTime);
+    int64_t nanosDelta = nanoTime - oldTime;
+    if (nanosDelta > 0 && oldTime > 0) {
+        int64_t framesDelta = framePosition - oldPosition;
+        int64_t rate = (framesDelta * OBOE_NANOS_PER_SECOND) / nanosDelta;
+        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+        ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+    }
+    oldPosition = framePosition;
+    oldTime = nanoTime;
+}
+#endif
+
+oboe_result_t AudioStreamInternal::onTimestampFromServer(OboeServiceMessage *message) {
+    oboe_position_frames_t framePosition = 0;
+#if LOG_TIMESTAMPS
+    AudioStreamInternal_LogTimestamp(command);
+#endif
+    framePosition = message->timestamp.position;
+    processTimestamp(framePosition, message->timestamp.timestamp);
+    return OBOE_OK;
+}
+
+oboe_result_t AudioStreamInternal::onEventFromServer(OboeServiceMessage *message) {
+    oboe_result_t result = OBOE_OK;
+    ALOGD("processCommands() got event %d", message->event.event);
+    switch (message->event.event) {
+        case OBOE_SERVICE_EVENT_STARTED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_STARTED");
+            setState(OBOE_STREAM_STATE_STARTED);
+            break;
+        case OBOE_SERVICE_EVENT_PAUSED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_PAUSED");
+            setState(OBOE_STREAM_STATE_PAUSED);
+            break;
+        case OBOE_SERVICE_EVENT_FLUSHED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_FLUSHED");
+            setState(OBOE_STREAM_STATE_FLUSHED);
+            onFlushFromServer();
+            break;
+        case OBOE_SERVICE_EVENT_CLOSED:
+            ALOGD("processCommands() got OBOE_SERVICE_EVENT_CLOSED");
+            setState(OBOE_STREAM_STATE_CLOSED);
+            break;
+        case OBOE_SERVICE_EVENT_DISCONNECTED:
+            result = OBOE_ERROR_DISCONNECTED;
+            ALOGW("WARNING - processCommands() OBOE_SERVICE_EVENT_DISCONNECTED");
+            break;
+        default:
+            ALOGW("WARNING - processCommands() Unrecognized event = %d",
+                 (int) message->event.event);
+            break;
+    }
+    return result;
+}
+
+// Process all the commands coming from the server.
+oboe_result_t AudioStreamInternal::processCommands() {
+    oboe_result_t result = OBOE_OK;
+
+    // Let the service run in case it is a fake service simulator.
+    mService->tickle(); // TODO use real service thread
+
+    while (result == OBOE_OK) {
+        OboeServiceMessage message;
+        if (mAudioEndpoint.readUpCommand(&message) != 1) {
+            break; // no command this time, no problem
+        }
+        switch (message.what) {
+        case OboeServiceMessage::code::TIMESTAMP:
+            result = onTimestampFromServer(&message);
+            break;
+
+        case OboeServiceMessage::code::EVENT:
+            result = onEventFromServer(&message);
+            break;
+
+        default:
+            ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+                 (int) message.what);
+            result = OBOE_ERROR_UNEXPECTED_VALUE;
+            break;
+        }
+    }
+    return result;
+}
+
+// Write the data, block if needed and timeoutMillis > 0
+oboe_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
+                                         oboe_nanoseconds_t timeoutNanoseconds)
+{
+    oboe_result_t result = OBOE_OK;
+    uint8_t* source = (uint8_t*)buffer;
+    oboe_nanoseconds_t currentTimeNanos = AudioClock::getNanoseconds();
+    oboe_nanoseconds_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int32_t framesLeft = numFrames;
+//    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
+//         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
+
+    // Write until all the data has been written or until a timeout occurs.
+    while (framesLeft > 0) {
+        // The call to writeNow() will not block. It will just write as much as it can.
+        oboe_nanoseconds_t wakeTimeNanos = 0;
+        oboe_result_t framesWritten = writeNow(source, framesLeft,
+                                               currentTimeNanos, &wakeTimeNanos);
+//        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
+        if (framesWritten < 0) {
+            result = framesWritten;
+            break;
+        }
+        framesLeft -= (int32_t) framesWritten;
+        source += framesWritten * getBytesPerFrame();
+
+        // Should we block?
+        if (timeoutNanoseconds == 0) {
+            break; // don't block
+        } else if (framesLeft > 0) {
+            //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
+            // clip the wake time to something reasonable
+            if (wakeTimeNanos < currentTimeNanos) {
+                wakeTimeNanos = currentTimeNanos;
+            }
+            if (wakeTimeNanos > deadlineNanos) {
+                // If we time out, just return the framesWritten so far.
+                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
+                break;
+            }
+
+            //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
+            //        (long long) (wakeTimeNanos - currentTimeNanos));
+            AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
+            currentTimeNanos = AudioClock::getNanoseconds();
+        }
+    }
+
+    // return error or framesWritten
+    return (result < 0) ? result : numFrames - framesLeft;
+}
+
+// Write as much data as we can without blocking.
+oboe_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
+                                         oboe_nanoseconds_t currentNanoTime, oboe_nanoseconds_t *wakeTimePtr) {
+    {
+        oboe_result_t result = processCommands();
+        if (result != OBOE_OK) {
+            return result;
+        }
+    }
+
+    if (mAudioEndpoint.isOutputFreeRunning()) {
+        // Update data queue based on the timing model.
+        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
+        // If the read index passed the write index then consider it an underrun.
+        if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+            mXRunCount++;
+        }
+    }
+    // TODO else query from endpoint cuz set by actual reader, maybe
+
+    // Write some data to the buffer.
+    int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
+    if (framesWritten > 0) {
+        incrementFramesWritten(framesWritten);
+    }
+    //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
+    //    numFrames, framesWritten);
+
+    // Calculate an ideal time to wake up.
+    if (wakeTimePtr != nullptr && framesWritten >= 0) {
+        // By default wake up a few milliseconds from now.  // TODO review
+        oboe_nanoseconds_t wakeTime = currentNanoTime + (2 * OBOE_NANOS_PER_MILLISECOND);
+        switch (getState()) {
+            case OBOE_STREAM_STATE_OPEN:
+            case OBOE_STREAM_STATE_STARTING:
+                if (framesWritten != 0) {
+                    // Don't wait to write more data. Just prime the buffer.
+                    wakeTime = currentNanoTime;
+                }
+                break;
+            case OBOE_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
+                {
+                    uint32_t burstSize = mFramesPerBurst;
+                    if (burstSize < 32) {
+                        burstSize = 32; // TODO review
+                    }
+
+                    uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
+                    wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+                }
+                break;
+            default:
+                break;
+        }
+        *wakeTimePtr = wakeTime;
+
+    }
+//    ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
+//         (unsigned long long)currentNanoTime,
+//         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
+//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+    return framesWritten;
+}
+
+oboe_result_t AudioStreamInternal::waitForStateChange(oboe_stream_state_t currentState,
+                                                      oboe_stream_state_t *nextState,
+                                                      oboe_nanoseconds_t timeoutNanoseconds)
+
+{
+    oboe_result_t result = processCommands();
+//    ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
+    if (result != OBOE_OK) {
+        return result;
+    }
+    // TODO replace this polling with a timed sleep on a futex on the message queue
+    int32_t durationNanos = 5 * OBOE_NANOS_PER_MILLISECOND;
+    oboe_stream_state_t state = getState();
+//    ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+    while (state == currentState && timeoutNanoseconds > 0) {
+        // TODO use futex from service message queue
+        if (durationNanos > timeoutNanoseconds) {
+            durationNanos = timeoutNanoseconds;
+        }
+        AudioClock::sleepForNanos(durationNanos);
+        timeoutNanoseconds -= durationNanos;
+
+        result = processCommands();
+        if (result != OBOE_OK) {
+            return result;
+        }
+
+        state = getState();
+//        ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
+    }
+    if (nextState != nullptr) {
+        *nextState = state;
+    }
+    return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
+}
+
+
+void AudioStreamInternal::processTimestamp(uint64_t position, oboe_nanoseconds_t time) {
+    mClockModel.processTimestamp( position, time);
+}
+
+oboe_result_t AudioStreamInternal::setBufferSize(oboe_size_frames_t requestedFrames,
+                                        oboe_size_frames_t *actualFrames) {
+    return mAudioEndpoint.setBufferSizeInFrames(requestedFrames, actualFrames);
+}
+
+oboe_size_frames_t AudioStreamInternal::getBufferSize() const
+{
+    return mAudioEndpoint.getBufferSizeInFrames();
+}
+
+oboe_size_frames_t AudioStreamInternal::getBufferCapacity() const
+{
+    return mAudioEndpoint.getBufferCapacityInFrames();
+}
+
+oboe_size_frames_t AudioStreamInternal::getFramesPerBurst() const
+{
+    return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+}
+
+oboe_position_frames_t AudioStreamInternal::getFramesRead()
+{
+    oboe_position_frames_t framesRead =
+            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+            + mFramesOffsetFromService;
+    // Prevent retrograde motion.
+    if (framesRead < mLastFramesRead) {
+        framesRead = mLastFramesRead;
+    } else {
+        mLastFramesRead = framesRead;
+    }
+    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    return framesRead;
+}
+
+// TODO implement getTimestamp
diff --git a/media/liboboe/src/client/AudioStreamInternal.h b/media/liboboe/src/client/AudioStreamInternal.h
new file mode 100644
index 0000000..6f37761
--- /dev/null
+++ b/media/liboboe/src/client/AudioStreamInternal.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_AUDIOSTREAMINTERNAL_H
+#define OBOE_AUDIOSTREAMINTERNAL_H
+
+#include <stdint.h>
+#include <oboe/OboeAudio.h>
+
+#include "binding/IOboeAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "client/IsochronousClockModel.h"
+#include "client/AudioEndpoint.h"
+#include "core/AudioStream.h"
+
+using android::sp;
+using android::IOboeAudioService;
+
+namespace oboe {
+
+// A stream that talks to the OboeService or directly to a HAL.
+class AudioStreamInternal : public AudioStream {
+
+public:
+    AudioStreamInternal();
+    virtual ~AudioStreamInternal();
+
+    // =========== Begin ABSTRACT methods ===========================
+    virtual oboe_result_t requestStart() override;
+
+    virtual oboe_result_t requestPause() override;
+
+    virtual oboe_result_t requestFlush() override;
+
+    virtual oboe_result_t requestStop() override;
+
+    // TODO use oboe_clockid_t all the way down to AudioClock
+    virtual oboe_result_t getTimestamp(clockid_t clockId,
+                                       oboe_position_frames_t *framePosition,
+                                       oboe_nanoseconds_t *timeNanoseconds) override;
+
+
+    virtual oboe_result_t updateState() override;
+    // =========== End ABSTRACT methods ===========================
+
+    virtual oboe_result_t open(const AudioStreamBuilder &builder) override;
+
+    virtual oboe_result_t close() override;
+
+    virtual oboe_result_t write(const void *buffer,
+                             int32_t numFrames,
+                             oboe_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual oboe_result_t waitForStateChange(oboe_stream_state_t currentState,
+                                          oboe_stream_state_t *nextState,
+                                          oboe_nanoseconds_t timeoutNanoseconds) override;
+
+    virtual oboe_result_t setBufferSize(oboe_size_frames_t requestedFrames,
+                                        oboe_size_frames_t *actualFrames) override;
+
+    virtual oboe_size_frames_t getBufferSize() const override;
+
+    virtual oboe_size_frames_t getBufferCapacity() const override;
+
+    virtual oboe_size_frames_t getFramesPerBurst() const override;
+
+    virtual oboe_position_frames_t getFramesRead() override;
+
+    virtual int32_t getXRunCount() const override {
+        return mXRunCount;
+    }
+
+    virtual oboe_result_t registerThread() override;
+
+    virtual oboe_result_t unregisterThread() override;
+
+protected:
+
+    oboe_result_t processCommands();
+
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+    virtual oboe_result_t writeNow(const void *buffer,
+                                int32_t numFrames,
+                                oboe_nanoseconds_t currentTimeNanos,
+                                oboe_nanoseconds_t *wakeTimePtr);
+
+    void onFlushFromServer();
+
+    oboe_result_t onEventFromServer(OboeServiceMessage *message);
+
+    oboe_result_t onTimestampFromServer(OboeServiceMessage *message);
+
+private:
+    IsochronousClockModel    mClockModel;
+    AudioEndpoint            mAudioEndpoint;
+    oboe_handle_t            mServiceStreamHandle;
+    EndpointDescriptor       mEndpointDescriptor;
+    sp<IOboeAudioService>    mService;
+    // Offset from underlying frame position.
+    oboe_position_frames_t   mFramesOffsetFromService = 0;
+    oboe_position_frames_t   mLastFramesRead = 0;
+    oboe_size_frames_t       mFramesPerBurst;
+    int32_t                  mXRunCount = 0;
+
+    void processTimestamp(uint64_t position, oboe_nanoseconds_t time);
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_AUDIOSTREAMINTERNAL_H
diff --git a/media/liboboe/src/client/IsochronousClockModel.cpp b/media/liboboe/src/client/IsochronousClockModel.cpp
new file mode 100644
index 0000000..b8e5538
--- /dev/null
+++ b/media/liboboe/src/client/IsochronousClockModel.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OboeAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <oboe/OboeDefinitions.h>
+
+#include "IsochronousClockModel.h"
+
+#define MIN_LATENESS_NANOS (10 * OBOE_NANOS_PER_MICROSECOND)
+
+using namespace android;
+using namespace oboe;
+
+IsochronousClockModel::IsochronousClockModel()
+        : mSampleRate(48000)
+        , mFramesPerBurst(64)
+        , mMaxLatenessInNanos(0)
+        , mMarkerFramePosition(0)
+        , mMarkerNanoTime(0)
+        , mState(STATE_STOPPED)
+{
+}
+
+IsochronousClockModel::~IsochronousClockModel() {
+}
+
+void IsochronousClockModel::start(oboe_nanoseconds_t nanoTime)
+{
+    mMarkerNanoTime = nanoTime;
+    mState = STATE_STARTING;
+}
+
+void IsochronousClockModel::stop(oboe_nanoseconds_t nanoTime)
+{
+    mMarkerNanoTime = nanoTime;
+    mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+    mState = STATE_STOPPED;
+}
+
+void IsochronousClockModel::processTimestamp(oboe_position_frames_t framePosition,
+                                             oboe_nanoseconds_t nanoTime) {
+    int64_t framesDelta = framePosition - mMarkerFramePosition;
+    int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+    if (nanosDelta < 1000) {
+        return;
+    }
+
+//    ALOGI("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
+//         (long long)mMarkerFramePosition,
+//         (long long)mMarkerNanoTime);
+//    ALOGI("processTimestamp() - framePosition = %lld at nanoTime %llu",
+//         (long long)framePosition,
+//         (long long)nanoTime);
+
+    int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
+//    ALOGI("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
+//         (long long)expectedNanosDelta,
+//         (long long)nanosDelta);
+
+//    ALOGI("processTimestamp() - mSampleRate = %d", mSampleRate);
+//    ALOGI("processTimestamp() - mState = %d", mState);
+    switch (mState) {
+    case STATE_STOPPED:
+        break;
+    case STATE_STARTING:
+        mMarkerFramePosition = framePosition;
+        mMarkerNanoTime = nanoTime;
+        mState = STATE_SYNCING;
+        break;
+    case STATE_SYNCING:
+        // This will handle a burst of rapid consumption in the beginning.
+        if (nanosDelta < expectedNanosDelta) {
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime;
+        } else {
+            ALOGI("processTimestamp() - advance to STATE_RUNNING");
+            mState = STATE_RUNNING;
+        }
+        break;
+    case STATE_RUNNING:
+        if (nanosDelta < expectedNanosDelta) {
+            // Earlier than expected timestamp.
+            // This data is probably more accurate so use it.
+            // or we may be drifting due to a slow HW clock.
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime;
+            ALOGI("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+        } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
+            // Later than expected timestamp.
+            mMarkerFramePosition = framePosition;
+            mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
+            ALOGI("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
+                 (int) (mMaxLatenessInNanos / 1000));
+        }
+        break;
+    default:
+        break;
+    }
+    ++mTimestampCount;
+}
+
+void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
+    mSampleRate = sampleRate;
+    update();
+}
+
+void IsochronousClockModel::setFramesPerBurst(int32_t framesPerBurst) {
+    mFramesPerBurst = framesPerBurst;
+    update();
+}
+
+void IsochronousClockModel::update() {
+    int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+    mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+}
+
+oboe_nanoseconds_t IsochronousClockModel::convertDeltaPositionToTime(
+        oboe_position_frames_t framesDelta) const {
+    return (OBOE_NANOS_PER_SECOND * framesDelta) / mSampleRate;
+}
+
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(oboe_nanoseconds_t nanosDelta) const {
+    return (mSampleRate * nanosDelta) / OBOE_NANOS_PER_SECOND;
+}
+
+oboe_nanoseconds_t IsochronousClockModel::convertPositionToTime(
+        oboe_position_frames_t framePosition) const {
+    if (mState == STATE_STOPPED) {
+        return mMarkerNanoTime;
+    }
+    oboe_position_frames_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+    oboe_position_frames_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+    oboe_position_frames_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+    oboe_nanoseconds_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+    oboe_nanoseconds_t time = (oboe_nanoseconds_t) (mMarkerNanoTime + nanosDelta);
+//    ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+//         (unsigned long long)framePosition,
+//         (unsigned long long)time);
+    return time;
+}
+
+oboe_position_frames_t IsochronousClockModel::convertTimeToPosition(
+        oboe_nanoseconds_t nanoTime) const {
+    if (mState == STATE_STOPPED) {
+        return mMarkerFramePosition;
+    }
+    oboe_nanoseconds_t nanosDelta = nanoTime - mMarkerNanoTime;
+    oboe_position_frames_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+    oboe_position_frames_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+    oboe_position_frames_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+    oboe_position_frames_t position = nextBurstIndex * mFramesPerBurst;
+//    ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+//         (unsigned long long)nanoTime,
+//         (unsigned long long)position);
+//    ALOGI("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+//         (long long) framesDelta, mFramesPerBurst);
+    return position;
+}
diff --git a/media/liboboe/src/client/IsochronousClockModel.h b/media/liboboe/src/client/IsochronousClockModel.h
new file mode 100644
index 0000000..97be325
--- /dev/null
+++ b/media/liboboe/src/client/IsochronousClockModel.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_ISOCHRONOUSCLOCKMODEL_H
+#define OBOE_ISOCHRONOUSCLOCKMODEL_H
+
+#include <stdint.h>
+#include <oboe/OboeAudio.h>
+
+namespace oboe {
+
+/**
+ * Model an isochronous data stream using occasional timestamps as input.
+ * This can be used to predict the position of the stream at a given time.
+ *
+ * This class is not thread safe and should only be called from one thread.
+ */
+class IsochronousClockModel {
+
+public:
+    IsochronousClockModel();
+    virtual ~IsochronousClockModel();
+
+    void start(oboe_nanoseconds_t nanoTime);
+    void stop(oboe_nanoseconds_t nanoTime);
+
+    void processTimestamp(oboe_position_frames_t framePosition, oboe_nanoseconds_t nanoTime);
+
+    /**
+     * @param sampleRate rate of the stream in frames per second
+     */
+    void setSampleRate(oboe_sample_rate_t sampleRate);
+
+    oboe_sample_rate_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    /**
+     * This must be set accurately in order to track the isochronous stream.
+     *
+     * @param framesPerBurst number of frames that stream advance at one time.
+     */
+    void setFramesPerBurst(oboe_size_frames_t framesPerBurst);
+
+    oboe_size_frames_t getFramesPerBurst() const {
+        return mFramesPerBurst;
+    }
+
+    /**
+     * Calculate an estimated time when the stream will be at that position.
+     *
+     * @param framePosition position of the stream in frames
+     * @return time in nanoseconds
+     */
+    oboe_nanoseconds_t convertPositionToTime(oboe_position_frames_t framePosition) const;
+
+    /**
+     * Calculate an estimated position where the stream will be at the specified time.
+     *
+     * @param nanoTime time of interest
+     * @return position in frames
+     */
+    oboe_position_frames_t convertTimeToPosition(oboe_nanoseconds_t nanoTime) const;
+
+    /**
+     * @param framesDelta difference in frames
+     * @return duration in nanoseconds
+     */
+    oboe_nanoseconds_t convertDeltaPositionToTime(oboe_position_frames_t framesDelta) const;
+
+    /**
+     * @param nanosDelta duration in nanoseconds
+     * @return frames that stream will advance in that time
+     */
+    oboe_position_frames_t convertDeltaTimeToPosition(oboe_nanoseconds_t nanosDelta) const;
+
+private:
+    enum clock_model_state_t {
+        STATE_STOPPED,
+        STATE_STARTING,
+        STATE_SYNCING,
+        STATE_RUNNING
+    };
+
+    oboe_sample_rate_t     mSampleRate;
+    oboe_size_frames_t     mFramesPerBurst;
+    int32_t                mMaxLatenessInNanos;
+    oboe_position_frames_t mMarkerFramePosition;
+    oboe_nanoseconds_t     mMarkerNanoTime;
+    int32_t                mTimestampCount;
+    clock_model_state_t     mState;
+
+    void update();
+};
+
+} /* namespace oboe */
+
+#endif //OBOE_ISOCHRONOUSCLOCKMODEL_H
diff --git a/media/liboboe/src/core/AudioStream.cpp b/media/liboboe/src/core/AudioStream.cpp
index f154002..cc654c3 100644
--- a/media/liboboe/src/core/AudioStream.cpp
+++ b/media/liboboe/src/core/AudioStream.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <atomic>
 #include <stdint.h>
 #include <oboe/OboeAudio.h>
 
@@ -27,10 +28,10 @@
 
 using namespace oboe;
 
-/*
- * AudioStream
- */
 AudioStream::AudioStream() {
+    // mThread is a pthread_t of unknown size so we need memset.
+    memset(&mThread, 0, sizeof(mThread));
+    setPeriodNanoseconds(0);
 }
 
 oboe_result_t AudioStream::open(const AudioStreamBuilder& builder)
@@ -91,23 +92,51 @@
 
         state = getState();
     }
-    if (nextState != NULL) {
+    if (nextState != nullptr) {
         *nextState = state;
     }
     return (state == currentState) ? OBOE_ERROR_TIMEOUT : OBOE_OK;
 }
 
+// This registers the app's background audio thread with the server before
+// passing control to the app. This gives the server an opportunity to boost
+// the thread's performance characteristics.
+void* AudioStream::wrapUserThread() {
+    void* procResult = nullptr;
+    mThreadRegistrationResult = registerThread();
+    if (mThreadRegistrationResult == OBOE_OK) {
+        // Call application procedure. This may take a very long time.
+        procResult = mThreadProc(mThreadArg);
+        ALOGD("AudioStream::mThreadProc() returned");
+        mThreadRegistrationResult = unregisterThread();
+    }
+    return procResult;
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void* AudioStream_internalThreadProc(void* threadArg) {
+    AudioStream *audioStream = (AudioStream *) threadArg;
+    return audioStream->wrapUserThread();
+}
+
 oboe_result_t AudioStream::createThread(oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*startRoutine)(void *), void *arg)
+                                     oboe_audio_thread_proc_t *threadProc,
+                                     void* threadArg)
 {
     if (mHasThread) {
         return OBOE_ERROR_INVALID_STATE;
     }
-    if (startRoutine == NULL) {
+    if (threadProc == nullptr) {
         return OBOE_ERROR_NULL;
     }
-    int err = pthread_create(&mThread, NULL, startRoutine, arg);
+    // Pass input parameters to the background thread.
+    mThreadProc = threadProc;
+    mThreadArg = threadArg;
+    setPeriodNanoseconds(periodNanoseconds);
+    int err = pthread_create(&mThread, nullptr, AudioStream_internalThreadProc, this);
     if (err != 0) {
+        // TODO convert errno to oboe_result_t
         return OBOE_ERROR_INTERNAL;
     } else {
         mHasThread = true;
@@ -115,7 +144,7 @@
     }
 }
 
-oboe_result_t AudioStream::joinThread(void **returnArg, oboe_nanoseconds_t timeoutNanoseconds)
+oboe_result_t AudioStream::joinThread(void** returnArg, oboe_nanoseconds_t timeoutNanoseconds)
 {
     if (!mHasThread) {
         return OBOE_ERROR_INVALID_STATE;
@@ -128,7 +157,7 @@
     int err = pthread_join(mThread, returnArg);
 #endif
     mHasThread = false;
-    // TODO Just leaked a thread?
-    return err ? OBOE_ERROR_INTERNAL : OBOE_OK;
+    // TODO convert errno to oboe_result_t
+    return err ? OBOE_ERROR_INTERNAL : mThreadRegistrationResult;
 }
 
diff --git a/media/liboboe/src/core/AudioStream.h b/media/liboboe/src/core/AudioStream.h
index 8cbb091..c13ae9f 100644
--- a/media/liboboe/src/core/AudioStream.h
+++ b/media/liboboe/src/core/AudioStream.h
@@ -17,9 +17,11 @@
 #ifndef OBOE_AUDIOSTREAM_H
 #define OBOE_AUDIOSTREAM_H
 
-#include <unistd.h>
-#include <sys/types.h>
+#include <atomic>
+#include <stdint.h>
+#include <oboe/OboeDefinitions.h>
 #include <oboe/OboeAudio.h>
+
 #include "OboeUtilities.h"
 #include "MonotonicCounter.h"
 
@@ -83,10 +85,25 @@
     }
 
     virtual oboe_result_t createThread(oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*start_routine)(void *), void *arg);
+                                       oboe_audio_thread_proc_t *threadProc,
+                                       void *threadArg);
 
     virtual oboe_result_t joinThread(void **returnArg, oboe_nanoseconds_t timeoutNanoseconds);
 
+    virtual oboe_result_t registerThread() {
+        return OBOE_OK;
+    }
+
+    virtual oboe_result_t unregisterThread() {
+        return OBOE_OK;
+    }
+
+    /**
+     * Internal function used to call the audio thread passed by the user.
+     * It is unfortunately public because it needs to be called by a static 'C' function.
+     */
+    void* wrapUserThread();
+
     // ============== Queries ===========================
 
     virtual oboe_stream_state_t getState() const {
@@ -125,7 +142,7 @@
         return mSamplesPerFrame;
     }
 
-    OboeDeviceId getDeviceId() const {
+    oboe_device_id_t getDeviceId() const {
         return mDeviceId;
     }
 
@@ -220,21 +237,42 @@
         mState = state;
     }
 
+
+
+protected:
     MonotonicCounter     mFramesWritten;
     MonotonicCounter     mFramesRead;
 
+    void setPeriodNanoseconds(oboe_nanoseconds_t periodNanoseconds) {
+        mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
+    }
+
+    oboe_nanoseconds_t getPeriodNanoseconds() {
+        return mPeriodNanoseconds.load(std::memory_order_acquire);
+    }
+
 private:
     // These do not change after open().
     int32_t              mSamplesPerFrame = OBOE_UNSPECIFIED;
     oboe_sample_rate_t   mSampleRate = OBOE_UNSPECIFIED;
     oboe_stream_state_t  mState = OBOE_STREAM_STATE_UNINITIALIZED;
-    OboeDeviceId         mDeviceId = OBOE_UNSPECIFIED;
+    oboe_device_id_t     mDeviceId = OBOE_UNSPECIFIED;
     oboe_sharing_mode_t  mSharingMode = OBOE_SHARING_MODE_LEGACY;
-    oboe_audio_format_t  mFormat = OBOE_UNSPECIFIED;
+    oboe_audio_format_t  mFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
     oboe_direction_t     mDirection = OBOE_DIRECTION_OUTPUT;
 
+    // background thread ----------------------------------
     bool                 mHasThread = false;
-    pthread_t            mThread;
+    pthread_t            mThread; // initialized in constructor
+
+    // These are set by the application thread and then read by the audio pthread.
+    std::atomic<oboe_nanoseconds_t>  mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+    // TODO make atomic?
+    oboe_audio_thread_proc_t* mThreadProc = nullptr;
+    void*                mThreadArg = nullptr;
+    oboe_result_t        mThreadRegistrationResult = OBOE_OK;
+
+
 };
 
 } /* namespace oboe */
diff --git a/media/liboboe/src/core/AudioStreamBuilder.cpp b/media/liboboe/src/core/AudioStreamBuilder.cpp
index 56e6706..37e1378 100644
--- a/media/liboboe/src/core/AudioStreamBuilder.cpp
+++ b/media/liboboe/src/core/AudioStreamBuilder.cpp
@@ -18,11 +18,17 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <sys/types.h>
-#include "AudioStream.h"
-#include "AudioStreamBuilder.h"
-#include "AudioStreamRecord.h"
-#include "AudioStreamTrack.h"
+#include <new>
+#include <stdint.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <oboe/OboeAudio.h>
+
+#include "client/AudioStreamInternal.h"
+#include "core/AudioStream.h"
+#include "core/AudioStreamBuilder.h"
+#include "legacy/AudioStreamRecord.h"
+#include "legacy/AudioStreamTrack.h"
 
 using namespace oboe;
 
@@ -35,15 +41,15 @@
 AudioStreamBuilder::~AudioStreamBuilder() {
 }
 
-oboe_result_t AudioStreamBuilder::build(AudioStream **streamPtr) {
+oboe_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
     // TODO Is there a better place to put the code that decides which class to use?
-    AudioStream *audioStream = nullptr;
+    AudioStream* audioStream = nullptr;
     const oboe_sharing_mode_t sharingMode = getSharingMode();
     switch (getDirection()) {
     case OBOE_DIRECTION_INPUT:
         switch (sharingMode) {
             case OBOE_SHARING_MODE_LEGACY:
-                audioStream = new AudioStreamRecord();
+                audioStream = new(std::nothrow) AudioStreamRecord();
                 break;
             default:
                 ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
@@ -54,7 +60,10 @@
     case OBOE_DIRECTION_OUTPUT:
         switch (sharingMode) {
             case OBOE_SHARING_MODE_LEGACY:
-                audioStream = new AudioStreamTrack();
+                audioStream = new(std::nothrow) AudioStreamTrack();
+                break;
+            case OBOE_SHARING_MODE_EXCLUSIVE:
+                audioStream = new(std::nothrow) AudioStreamInternal();
                 break;
             default:
                 ALOGE("AudioStreamBuilder(): bad sharing mode = %d", sharingMode);
diff --git a/media/liboboe/src/core/AudioStreamBuilder.h b/media/liboboe/src/core/AudioStreamBuilder.h
index 3f98ebb..ec17eb6 100644
--- a/media/liboboe/src/core/AudioStreamBuilder.h
+++ b/media/liboboe/src/core/AudioStreamBuilder.h
@@ -17,7 +17,11 @@
 #ifndef OBOE_AUDIOSTREAMBUILDER_H
 #define OBOE_AUDIOSTREAMBUILDER_H
 
+#include <stdint.h>
+
+#include <oboe/OboeDefinitions.h>
 #include <oboe/OboeAudio.h>
+
 #include "AudioStream.h"
 
 namespace oboe {
@@ -38,7 +42,7 @@
     /**
      * This is also known as channelCount.
      */
-    AudioStreamBuilder *setSamplesPerFrame(int samplesPerFrame) {
+    AudioStreamBuilder* setSamplesPerFrame(int samplesPerFrame) {
         mSamplesPerFrame = samplesPerFrame;
         return this;
     }
@@ -47,7 +51,7 @@
         return mDirection;
     }
 
-    AudioStreamBuilder *setDirection(oboe_direction_t direction) {
+    AudioStreamBuilder* setDirection(oboe_direction_t direction) {
         mDirection = direction;
         return this;
     }
@@ -56,7 +60,7 @@
         return mSampleRate;
     }
 
-    AudioStreamBuilder *setSampleRate(oboe_sample_rate_t sampleRate) {
+    AudioStreamBuilder* setSampleRate(oboe_sample_rate_t sampleRate) {
         mSampleRate = sampleRate;
         return this;
     }
@@ -74,16 +78,16 @@
         return mSharingMode;
     }
 
-    AudioStreamBuilder *setSharingMode(oboe_sharing_mode_t sharingMode) {
+    AudioStreamBuilder* setSharingMode(oboe_sharing_mode_t sharingMode) {
         mSharingMode = sharingMode;
         return this;
     }
 
-    OboeDeviceId getDeviceId() const {
+    oboe_device_id_t getDeviceId() const {
         return mDeviceId;
     }
 
-    AudioStreamBuilder *setDeviceId(OboeDeviceId deviceId) {
+    AudioStreamBuilder* setDeviceId(oboe_device_id_t deviceId) {
         mDeviceId = deviceId;
         return this;
     }
@@ -93,9 +97,9 @@
 private:
     int32_t              mSamplesPerFrame = OBOE_UNSPECIFIED;
     oboe_sample_rate_t   mSampleRate = OBOE_UNSPECIFIED;
-    OboeDeviceId         mDeviceId = OBOE_UNSPECIFIED; // TODO need better default
+    oboe_device_id_t     mDeviceId = OBOE_DEVICE_UNSPECIFIED;
     oboe_sharing_mode_t  mSharingMode = OBOE_SHARING_MODE_LEGACY;
-    oboe_audio_format_t  mFormat = OBOE_UNSPECIFIED;
+    oboe_audio_format_t  mFormat = OBOE_AUDIO_FORMAT_UNSPECIFIED;
     oboe_direction_t     mDirection = OBOE_DIRECTION_OUTPUT;
 };
 
diff --git a/media/liboboe/src/core/OboeAudio.cpp b/media/liboboe/src/core/OboeAudio.cpp
index a02f226..d98ca36 100644
--- a/media/liboboe/src/core/OboeAudio.cpp
+++ b/media/liboboe/src/core/OboeAudio.cpp
@@ -23,21 +23,13 @@
 
 #include <oboe/OboeDefinitions.h>
 #include <oboe/OboeAudio.h>
+
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "AudioClock.h"
+#include "client/AudioStreamInternal.h"
 #include "HandleTracker.h"
 
-// temporary, as I stage in the MMAP/NOIRQ support, do not review
-#ifndef OBOE_SUPPORT_MMAP
-#define OBOE_SUPPORT_MMAP 0
-#endif
-
-#if OBOE_SUPPORT_MMAP
-#include "AudioStreamInternal.h"
-#include "OboeServiceGateway.h"
-#endif
-
 using namespace oboe;
 
 // This is not the maximum theoretic possible number of handles that the HandlerTracker
@@ -71,6 +63,8 @@
         return OBOE_ERROR_NULL; \
     }
 
+// Static data.
+// TODO static constructors are discouraged, alternatives?
 static HandleTracker sHandleTracker(OBOE_MAX_HANDLES);
 
 typedef enum
@@ -81,9 +75,6 @@
 } oboe_handle_type_t;
 static_assert(OBOE_HANDLE_TYPE_COUNT <= HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
 
-#if OBOE_SUPPORT_MMAP
-static OboeServiceGateway sOboeServiceGateway;
-#endif
 
 #define OBOE_CASE_ENUM(name) case name: return #name
 
@@ -165,13 +156,21 @@
 }
 
 OBOE_API oboe_result_t OboeStreamBuilder_setDeviceId(OboeStreamBuilder builder,
-                                                     OboeDeviceId deviceId)
+                                                     oboe_device_id_t deviceId)
 {
     AudioStreamBuilder *streamBuilder = CONVERT_BUILDER_HANDLE_OR_RETURN();
     streamBuilder->setDeviceId(deviceId);
     return OBOE_OK;
 }
 
+OBOE_API oboe_result_t OboeStreamBuilder_getDeviceId(OboeStreamBuilder builder,
+                                              oboe_device_id_t *deviceId)
+{
+    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(deviceId);
+    *deviceId = streamBuilder->getDeviceId();
+    return OBOE_OK;
+}
+
 OBOE_API oboe_result_t OboeStreamBuilder_setSampleRate(OboeStreamBuilder builder,
                                               oboe_sample_rate_t sampleRate)
 {
@@ -399,10 +398,10 @@
 
 OBOE_API oboe_result_t OboeStream_createThread(OboeStream stream,
                                      oboe_nanoseconds_t periodNanoseconds,
-                                     void *(*startRoutine)(void *), void *arg)
+                                     oboe_audio_thread_proc_t *threadProc, void *arg)
 {
     AudioStream *audioStream = CONVERT_STREAM_HANDLE_OR_RETURN();
-    return audioStream->createThread(periodNanoseconds, startRoutine, arg);
+    return audioStream->createThread(periodNanoseconds, threadProc, arg);
 }
 
 OBOE_API oboe_result_t OboeStream_joinThread(OboeStream stream,
@@ -513,6 +512,14 @@
     return OBOE_OK;
 }
 
+OBOE_API oboe_result_t OboeStream_getDeviceId(OboeStream stream,
+                                                 oboe_device_id_t *deviceId)
+{
+    AudioStream *audioStream = COMMON_GET_FROM_STREAM_OR_RETURN(deviceId);
+    *deviceId = audioStream->getDeviceId();
+    return OBOE_OK;
+}
+
 OBOE_API oboe_result_t OboeStream_getSharingMode(OboeStream stream,
                                                  oboe_sharing_mode_t *sharingMode)
 {
diff --git a/media/liboboe/src/fifo/FifoBuffer.cpp b/media/liboboe/src/fifo/FifoBuffer.cpp
new file mode 100644
index 0000000..c5489f1
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoBuffer.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstring>
+#include <unistd.h>
+
+#define LOG_TAG "FifoBuffer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "FifoControllerBase.h"
+#include "FifoController.h"
+#include "FifoControllerIndirect.h"
+#include "FifoBuffer.h"
+
+FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
+        : mFrameCapacity(capacityInFrames)
+        , mBytesPerFrame(bytesPerFrame)
+        , mStorage(nullptr)
+        , mFramesReadCount(0)
+        , mFramesUnderrunCount(0)
+        , mUnderrunCount(0)
+{
+    // TODO Handle possible failures to allocate. Move out of constructor?
+    mFifo = new FifoController(capacityInFrames, capacityInFrames);
+    // allocate buffer
+    int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
+    mStorage = new uint8_t[bytesPerBuffer];
+    mStorageOwned = true;
+    ALOGD("FifoBuffer: capacityInFrames = %d, bytesPerFrame = %d",
+          capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::FifoBuffer( int32_t   bytesPerFrame,
+                        fifo_frames_t   capacityInFrames,
+                        fifo_counter_t *  readIndexAddress,
+                        fifo_counter_t *  writeIndexAddress,
+                        void *  dataStorageAddress
+                        )
+        : mFrameCapacity(capacityInFrames)
+        , mBytesPerFrame(bytesPerFrame)
+        , mStorage(static_cast<uint8_t *>(dataStorageAddress))
+        , mFramesReadCount(0)
+        , mFramesUnderrunCount(0)
+        , mUnderrunCount(0)
+{
+    // TODO Handle possible failures to allocate. Move out of constructor?
+    mFifo = new FifoControllerIndirect(capacityInFrames,
+                                       capacityInFrames,
+                                       readIndexAddress,
+                                       writeIndexAddress);
+    mStorageOwned = false;
+    ALOGD("FifoProcessor: capacityInFrames = %d, bytesPerFrame = %d",
+          capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::~FifoBuffer() {
+    if (mStorageOwned) {
+        delete[] mStorage;
+    }
+    delete mFifo;
+}
+
+
+int32_t FifoBuffer::convertFramesToBytes(fifo_frames_t frames) {
+    return frames * mBytesPerFrame;
+}
+
+fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
+    size_t numBytes;
+    fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+    fifo_frames_t framesToRead = numFrames;
+    // Is there enough data in the FIFO
+    if (framesToRead > framesAvailable) {
+        framesToRead = framesAvailable;
+    }
+    if (framesToRead == 0) {
+        return 0;
+    }
+
+    fifo_frames_t readIndex = mFifo->getReadIndex();
+    uint8_t *destination = (uint8_t *) buffer;
+    uint8_t *source = &mStorage[convertFramesToBytes(readIndex)];
+    if ((readIndex + framesToRead) > mFrameCapacity) {
+        // read in two parts, first part here
+        fifo_frames_t frames1 = mFrameCapacity - readIndex;
+        int32_t numBytes = convertFramesToBytes(frames1);
+        memcpy(destination, source, numBytes);
+        destination += numBytes;
+        // read second part
+        source = &mStorage[0];
+        fifo_frames_t frames2 = framesToRead - frames1;
+        numBytes = convertFramesToBytes(frames2);
+        memcpy(destination, source, numBytes);
+    } else {
+        // just read in one shot
+        numBytes = convertFramesToBytes(framesToRead);
+        memcpy(destination, source, numBytes);
+    }
+    mFifo->advanceReadIndex(framesToRead);
+
+    return framesToRead;
+}
+
+fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t framesToWrite) {
+    fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
+//    ALOGD("FifoBuffer::write() framesToWrite = %d, framesAvailable = %d",
+//         framesToWrite, framesAvailable);
+    if (framesToWrite > framesAvailable) {
+        framesToWrite = framesAvailable;
+    }
+    if (framesToWrite <= 0) {
+        return 0;
+    }
+
+    size_t numBytes;
+    fifo_frames_t writeIndex = mFifo->getWriteIndex();
+    int byteIndex = convertFramesToBytes(writeIndex);
+    const uint8_t *source = (const uint8_t *) buffer;
+    uint8_t *destination = &mStorage[byteIndex];
+    if ((writeIndex + framesToWrite) > mFrameCapacity) {
+        // write in two parts, first part here
+        fifo_frames_t frames1 = mFrameCapacity - writeIndex;
+        numBytes = convertFramesToBytes(frames1);
+        memcpy(destination, source, numBytes);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        // read second part
+        source += convertFramesToBytes(frames1);
+        destination = &mStorage[0];
+        fifo_frames_t framesLeft = framesToWrite - frames1;
+        numBytes = convertFramesToBytes(framesLeft);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        memcpy(destination, source, numBytes);
+    } else {
+        // just write in one shot
+        numBytes = convertFramesToBytes(framesToWrite);
+//        ALOGD("FifoBuffer::write(%p to %p, numBytes = %d", source, destination, numBytes);
+        memcpy(destination, source, numBytes);
+    }
+    mFifo->advanceWriteIndex(framesToWrite);
+
+    return framesToWrite;
+}
+
+fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
+    mLastReadSize = numFrames;
+    fifo_frames_t framesLeft = numFrames;
+    fifo_frames_t framesRead = read(buffer, numFrames);
+    framesLeft -= framesRead;
+    mFramesReadCount += framesRead;
+    mFramesUnderrunCount += framesLeft;
+    // Zero out any samples we could not set.
+    if (framesLeft > 0) {
+        mUnderrunCount++;
+        int32_t bytesToZero = convertFramesToBytes(framesLeft);
+        memset(buffer, 0, bytesToZero);
+    }
+
+    return framesRead;
+}
+
+fifo_frames_t FifoBuffer::getThreshold() {
+    return mFifo->getThreshold();
+}
+
+void FifoBuffer::setThreshold(fifo_frames_t threshold) {
+    mFifo->setThreshold(threshold);
+}
+
+fifo_frames_t FifoBuffer::getBufferCapacityInFrames() {
+    return mFifo->getCapacity();
+}
+
diff --git a/media/liboboe/src/fifo/FifoBuffer.h b/media/liboboe/src/fifo/FifoBuffer.h
new file mode 100644
index 0000000..faa9ae2
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoBuffer.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_BUFFER_H
+#define FIFO_FIFO_BUFFER_H
+
+#include <stdint.h>
+
+#include "FifoControllerBase.h"
+
+class FifoBuffer {
+public:
+    FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+    FifoBuffer(int32_t   bytesPerFrame,
+               fifo_frames_t   capacityInFrames,
+               fifo_counter_t * readCounterAddress,
+               fifo_counter_t * writeCounterAddress,
+               void * dataStorageAddress);
+
+    ~FifoBuffer();
+
+    int32_t convertFramesToBytes(fifo_frames_t frames);
+
+    fifo_frames_t read(void *destination, fifo_frames_t framesToRead);
+
+    fifo_frames_t write(const void *source, fifo_frames_t framesToWrite);
+
+    fifo_frames_t getThreshold();
+    void setThreshold(fifo_frames_t threshold);
+
+    fifo_frames_t getBufferCapacityInFrames();
+
+    fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
+
+    int64_t getNextReadTime(int32_t frameRate);
+
+    int32_t getUnderrunCount() const { return mUnderrunCount; }
+
+    FifoControllerBase *getFifoControllerBase() { return mFifo; }
+
+    int32_t getBytesPerFrame() {
+        return mBytesPerFrame;
+    }
+
+    fifo_counter_t getReadCounter() {
+        return mFifo->getReadCounter();
+    }
+
+    void setReadCounter(fifo_counter_t n) {
+        mFifo->setReadCounter(n);
+    }
+
+    fifo_counter_t getWriteCounter() {
+        return mFifo->getWriteCounter();
+    }
+
+    void setWriteCounter(fifo_counter_t n) {
+        mFifo->setWriteCounter(n);
+    }
+
+private:
+    const fifo_frames_t mFrameCapacity;
+    const int32_t       mBytesPerFrame;
+    uint8_t *           mStorage;
+    bool                mStorageOwned; // did this object allocate the storage?
+    FifoControllerBase *mFifo;
+    fifo_counter_t      mFramesReadCount;
+    fifo_counter_t      mFramesUnderrunCount;
+    int32_t             mUnderrunCount; // need? just use frames
+    int32_t             mLastReadSize;
+};
+
+#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/liboboe/src/fifo/FifoController.h b/media/liboboe/src/fifo/FifoController.h
new file mode 100644
index 0000000..7434634
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoController.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_H
+#define FIFO_FIFO_CONTROLLER_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+/**
+ * A FIFO with counters contained in the class.
+ */
+class FifoController : public FifoControllerBase
+{
+public:
+    FifoController(fifo_counter_t bufferSize, fifo_counter_t threshold)
+    : FifoControllerBase(bufferSize, threshold)
+    , mReadCounter(0)
+    , mWriteCounter(0)
+    {}
+
+    virtual ~FifoController() {}
+
+    // TODO review use of memory barriers, probably incorrect
+    virtual fifo_counter_t getReadCounter() override {
+        return mReadCounter.load(std::memory_order_acquire);
+    }
+    virtual void setReadCounter(fifo_counter_t n) override {
+        mReadCounter.store(n, std::memory_order_release);
+    }
+    virtual fifo_counter_t getWriteCounter() override {
+        return mWriteCounter.load(std::memory_order_acquire);
+    }
+    virtual void setWriteCounter(fifo_counter_t n) override {
+        mWriteCounter.store(n, std::memory_order_release);
+    }
+
+private:
+    std::atomic<fifo_counter_t> mReadCounter;
+    std::atomic<fifo_counter_t> mWriteCounter;
+};
+
+
+#endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/liboboe/src/fifo/FifoControllerBase.cpp b/media/liboboe/src/fifo/FifoControllerBase.cpp
new file mode 100644
index 0000000..33a253e
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerBase.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FifoControllerBase"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include "FifoControllerBase.h"
+
+FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
+        : mCapacity(capacity)
+        , mThreshold(threshold)
+{
+}
+
+FifoControllerBase::~FifoControllerBase() {
+}
+
+fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
+    return (fifo_frames_t) (getWriteCounter() - getReadCounter());
+}
+
+fifo_frames_t FifoControllerBase::getReadIndex() {
+    // % works with non-power of two sizes
+    return (fifo_frames_t) (getReadCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceReadIndex(fifo_frames_t numFrames) {
+    setReadCounter(getReadCounter() + numFrames);
+}
+
+fifo_frames_t FifoControllerBase::getEmptyFramesAvailable() {
+    return (int32_t)(mThreshold - getFullFramesAvailable());
+}
+
+fifo_frames_t FifoControllerBase::getWriteIndex() {
+    // % works with non-power of two sizes
+    return (fifo_frames_t) (getWriteCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceWriteIndex(fifo_frames_t numFrames) {
+    setWriteCounter(getWriteCounter() + numFrames);
+}
+
+void FifoControllerBase::setThreshold(fifo_frames_t threshold) {
+    mThreshold = threshold;
+}
diff --git a/media/liboboe/src/fifo/FifoControllerBase.h b/media/liboboe/src/fifo/FifoControllerBase.h
new file mode 100644
index 0000000..c543519
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerBase.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_BASE_H
+#define FIFO_FIFO_CONTROLLER_BASE_H
+
+#include <stdint.h>
+
+typedef int64_t fifo_counter_t;
+typedef int32_t fifo_frames_t;
+
+/**
+ * Manage the read/write indices of a circular buffer.
+ *
+ * The caller is responsible for reading and writing the actual data.
+ * Note that the span of available frames may not be contiguous. They
+ * may wrap around from the end to the beginning of the buffer. In that
+ * case the data must be read or written in at least two blocks of frames.
+ *
+ */
+class FifoControllerBase {
+
+public:
+    /**
+     * Constructor for FifoControllerBase
+     * @param capacity Total size of the circular buffer in frames.
+     * @param threshold Number of frames to fill. Must be less than capacity.
+     */
+    FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
+
+    virtual ~FifoControllerBase();
+
+    // Abstract methods to be implemented in subclasses.
+    /**
+     * @return Counter used by the reader of the FIFO.
+     */
+    virtual fifo_counter_t getReadCounter() = 0;
+
+    /**
+     * This is normally only used internally.
+     * @param count Number of frames that have been read.
+     */
+    virtual void setReadCounter(fifo_counter_t count) = 0;
+
+    /**
+     * @return Counter used by the reader of the FIFO.
+     */
+    virtual fifo_counter_t getWriteCounter() = 0;
+
+    /**
+     * This is normally only used internally.
+     * @param count Number of frames that have been read.
+     */
+    virtual void setWriteCounter(fifo_counter_t count) = 0;
+
+    /**
+     * This may be negative if an unthrottled reader has read beyond the available data.
+     * @return number of valid frames available to read. Never read more than this.
+     */
+    fifo_frames_t getFullFramesAvailable();
+
+    /**
+     * The index in a circular buffer of the next frame to read.
+     */
+    fifo_frames_t getReadIndex();
+
+    /**
+     * @param numFrames number of frames to advance the read index
+     */
+    void advanceReadIndex(fifo_frames_t numFrames);
+
+    /**
+     * @return number of frames that can be written. Never write more than this.
+     */
+    fifo_frames_t getEmptyFramesAvailable();
+
+    /**
+     * The index in a circular buffer of the next frame to write.
+     */
+    fifo_frames_t getWriteIndex();
+
+    /**
+     * @param numFrames number of frames to advance the write index
+     */
+    void advanceWriteIndex(fifo_frames_t numFrames);
+
+    /**
+     * You can request that the buffer not be filled above a maximum
+     * number of frames.
+     * @param threshold effective size of the buffer
+     */
+    void setThreshold(fifo_frames_t threshold);
+
+    fifo_frames_t getThreshold() const {
+        return mThreshold;
+    }
+
+    fifo_frames_t getCapacity() const {
+        return mCapacity;
+    }
+
+
+private:
+    fifo_frames_t mCapacity;
+    fifo_frames_t mThreshold;
+};
+
+#endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/liboboe/src/fifo/FifoControllerIndirect.h b/media/liboboe/src/fifo/FifoControllerIndirect.h
new file mode 100644
index 0000000..1aaf9ea
--- /dev/null
+++ b/media/liboboe/src/fifo/FifoControllerIndirect.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_INDIRECT_H
+#define FIFO_FIFO_CONTROLLER_INDIRECT_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+/**
+ * A FifoControllerBase with counters external to the class.
+ *
+ * The actual copunters may be stored in separate regions of shared memory
+ * with different access rights.
+ */
+class FifoControllerIndirect : public FifoControllerBase {
+
+public:
+    FifoControllerIndirect(fifo_frames_t capacity,
+                           fifo_frames_t threshold,
+                           fifo_counter_t * readCounterAddress,
+                           fifo_counter_t * writeCounterAddress)
+        : FifoControllerBase(capacity, threshold)
+        , mReadCounterAddress((std::atomic<fifo_counter_t> *) readCounterAddress)
+        , mWriteCounterAddress((std::atomic<fifo_counter_t> *) writeCounterAddress)
+    {
+        setReadCounter(0);
+        setWriteCounter(0);
+    }
+    virtual ~FifoControllerIndirect() {};
+
+    // TODO review use of memory barriers, probably incorrect
+    virtual fifo_counter_t getReadCounter() override {
+        return mReadCounterAddress->load(std::memory_order_acquire);
+    }
+
+    virtual void setReadCounter(fifo_counter_t count) override {
+        mReadCounterAddress->store(count, std::memory_order_release);
+    }
+
+    virtual fifo_counter_t getWriteCounter() override {
+        return mWriteCounterAddress->load(std::memory_order_acquire);
+    }
+
+    virtual void setWriteCounter(fifo_counter_t count) override {
+        mWriteCounterAddress->store(count, std::memory_order_release);
+    }
+
+private:
+    std::atomic<fifo_counter_t> * mReadCounterAddress;
+    std::atomic<fifo_counter_t> * mWriteCounterAddress;
+};
+
+#endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/liboboe/src/fifo/README.md b/media/liboboe/src/fifo/README.md
new file mode 100644
index 0000000..61ffbae
--- /dev/null
+++ b/media/liboboe/src/fifo/README.md
@@ -0,0 +1,9 @@
+Simple atomic FIFO for passing data between threads or processes.
+This does not require mutexes.
+
+One thread modifies the readCounter and the other thread modifies the writeCounter.
+
+TODO The internal low-level implementation might be merged in some form with audio_utils fifo
+and/or FMQ [after confirming that requirements are met].
+The higher-levels parts related to Oboe use of the FIFO such as API, fds, relative
+location of indices and data buffer, mapping, allocation of memmory will probably be kept as-is.
diff --git a/media/liboboe/src/legacy/AudioStreamRecord.cpp b/media/liboboe/src/legacy/AudioStreamRecord.cpp
index f130cad..5854974 100644
--- a/media/liboboe/src/legacy/AudioStreamRecord.cpp
+++ b/media/liboboe/src/legacy/AudioStreamRecord.cpp
@@ -57,7 +57,7 @@
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
 
-    AudioRecord::callback_t callback = NULL;
+    AudioRecord::callback_t callback = nullptr;
     audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
 
     // TODO implement an unspecified Android format then use that.
@@ -75,14 +75,14 @@
 
             0,    //    size_t frameCount = 0,
             callback,
-            NULL, //    void* user = NULL,
+            nullptr, //    void* user = nullptr,
             0,    //    uint32_t notificationFrames = 0,
             AUDIO_SESSION_ALLOCATE,
             AudioRecord::TRANSFER_DEFAULT,
             flags
              //   int uid = -1,
              //   pid_t pid = -1,
-             //   const audio_attributes_t* pAttributes = NULL
+             //   const audio_attributes_t* pAttributes = nullptr
              );
 
     // Did we get a valid track?
@@ -115,7 +115,7 @@
 
 oboe_result_t AudioStreamRecord::requestStart()
 {
-    if (mAudioRecord.get() == NULL) {
+    if (mAudioRecord.get() == nullptr) {
         return OBOE_ERROR_INVALID_STATE;
     }
     // Get current position so we can detect when the track is playing.
@@ -142,7 +142,7 @@
 }
 
 oboe_result_t AudioStreamRecord::requestStop() {
-    if (mAudioRecord.get() == NULL) {
+    if (mAudioRecord.get() == nullptr) {
         return OBOE_ERROR_INVALID_STATE;
     }
     setState(OBOE_STREAM_STATE_STOPPING);
diff --git a/media/liboboe/src/legacy/AudioStreamTrack.cpp b/media/liboboe/src/legacy/AudioStreamTrack.cpp
index 5205fc5..b2c4ee1 100644
--- a/media/liboboe/src/legacy/AudioStreamTrack.cpp
+++ b/media/liboboe/src/legacy/AudioStreamTrack.cpp
@@ -58,10 +58,10 @@
     int32_t samplesPerFrame = (getSamplesPerFrame() == OBOE_UNSPECIFIED)
                               ? 2 : getSamplesPerFrame();
     audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
-    ALOGE("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
+    ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
             samplesPerFrame, channelMask);
 
-    AudioTrack::callback_t callback = NULL;
+    AudioTrack::callback_t callback = nullptr;
     // TODO add more performance options
     audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
     size_t frameCount = 0;
@@ -78,14 +78,15 @@
             frameCount,
             flags,
             callback,
-            NULL,    // user callback data
-            0,       // notificationFrames
+            nullptr,    // user callback data
+            0,          // notificationFrames
             AUDIO_SESSION_ALLOCATE,
             AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
             );
 
     // Did we get a valid track?
     status_t status = mAudioTrack->initCheck();
+    ALOGD("AudioStreamTrack::open(), initCheck() returned %d", status);
     // FIXME - this should work - if (status != NO_ERROR) {
     //         But initCheck() is returning 1 !
     if (status < 0) {
@@ -116,7 +117,7 @@
 
 oboe_result_t AudioStreamTrack::requestStart()
 {
-    if (mAudioTrack.get() == NULL) {
+    if (mAudioTrack.get() == nullptr) {
         return OBOE_ERROR_INVALID_STATE;
     }
     // Get current position so we can detect when the track is playing.
@@ -135,7 +136,7 @@
 
 oboe_result_t AudioStreamTrack::requestPause()
 {
-    if (mAudioTrack.get() == NULL) {
+    if (mAudioTrack.get() == nullptr) {
         return OBOE_ERROR_INVALID_STATE;
     } else if (getState() != OBOE_STREAM_STATE_STARTING
             && getState() != OBOE_STREAM_STATE_STARTED) {
@@ -152,7 +153,7 @@
 }
 
 oboe_result_t AudioStreamTrack::requestFlush() {
-    if (mAudioTrack.get() == NULL) {
+    if (mAudioTrack.get() == nullptr) {
         return OBOE_ERROR_INVALID_STATE;
     } else if (getState() != OBOE_STREAM_STATE_PAUSED) {
         return OBOE_ERROR_INVALID_STATE;
@@ -165,7 +166,7 @@
 }
 
 oboe_result_t AudioStreamTrack::requestStop() {
-    if (mAudioTrack.get() == NULL) {
+    if (mAudioTrack.get() == nullptr) {
         return OBOE_ERROR_INVALID_STATE;
     }
     setState(OBOE_STREAM_STATE_STOPPING);
diff --git a/media/liboboe/src/utility/AudioClock.h b/media/liboboe/src/utility/AudioClock.h
index da2f74a..1a5c209 100644
--- a/media/liboboe/src/utility/AudioClock.h
+++ b/media/liboboe/src/utility/AudioClock.h
@@ -17,10 +17,10 @@
 #ifndef UTILITY_AUDIOCLOCK_H
 #define UTILITY_AUDIOCLOCK_H
 
-#include <sys/types.h>
+#include <stdint.h>
 #include <time.h>
-#include "oboe/OboeDefinitions.h"
-#include "oboe/OboeAudio.h"
+
+#include <oboe/OboeDefinitions.h>
 
 class AudioClock {
 public:
diff --git a/media/liboboe/src/utility/HandleTracker.cpp b/media/liboboe/src/utility/HandleTracker.cpp
index be2a64c..bf5fb63 100644
--- a/media/liboboe/src/utility/HandleTracker.cpp
+++ b/media/liboboe/src/utility/HandleTracker.cpp
@@ -19,6 +19,7 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <new>
 #include <stdint.h>
 #include <assert.h>
 
@@ -51,25 +52,28 @@
 
 HandleTracker::HandleTracker(uint32_t maxHandles)
         : mMaxHandleCount(maxHandles)
-        , mHandleAddresses(nullptr)
         , mHandleHeaders(nullptr)
 {
     assert(maxHandles <= HANDLE_TRACKER_MAX_HANDLES);
     // Allocate arrays to hold addresses and validation info.
-    mHandleAddresses = (handle_tracker_address_t *) new handle_tracker_address_t[maxHandles];
+    mHandleAddresses = (handle_tracker_address_t *)
+            new(std::nothrow) handle_tracker_address_t[maxHandles];
     if (mHandleAddresses != nullptr) {
-        mHandleHeaders = new handle_tracker_header_t[maxHandles];
+        mHandleHeaders = new(std::nothrow) handle_tracker_header_t[maxHandles];
+
         if (mHandleHeaders != nullptr) {
-            // Initialize linked list of free nodes. NULL terminated.
+            handle_tracker_header_t initialHeader = buildHeader(0, 1);
+            // Initialize linked list of free nodes. nullptr terminated.
             for (uint32_t i = 0; i < (maxHandles - 1); i++) {
                 mHandleAddresses[i] = &mHandleAddresses[i + 1]; // point to next node
-                mHandleHeaders[i] = 0;
+                mHandleHeaders[i] = initialHeader;
             }
             mNextFreeAddress = &mHandleAddresses[0];
             mHandleAddresses[maxHandles - 1] = nullptr;
             mHandleHeaders[maxHandles - 1] = 0;
         } else {
             delete[] mHandleAddresses; // so the class appears uninitialized
+            mHandleAddresses = nullptr;
         }
     }
 }
@@ -131,7 +135,7 @@
     // Generate a handle.
     oboe_handle_t handle = buildHandle(inputHeader, index);
 
-    //ALOGD("HandleTracker::put(%p) returns 0x%08x", address, handle);
+    ALOGV("HandleTracker::put(%p) returns 0x%08x", address, handle);
     return handle;
 }
 
diff --git a/media/liboboe/src/utility/HandleTracker.h b/media/liboboe/src/utility/HandleTracker.h
index 37dbac8..4c08321 100644
--- a/media/liboboe/src/utility/HandleTracker.h
+++ b/media/liboboe/src/utility/HandleTracker.h
@@ -41,7 +41,7 @@
     /**
      * @param maxHandles cannot exceed HANDLE_TRACKER_MAX_HANDLES
      */
-    HandleTracker(uint32_t maxHandles);
+    HandleTracker(uint32_t maxHandles = 256);
     virtual ~HandleTracker();
 
     /**
diff --git a/media/liboboe/src/utility/OboeUtilities.cpp b/media/liboboe/src/utility/OboeUtilities.cpp
index b28f7c7..d9d2e88 100644
--- a/media/liboboe/src/utility/OboeUtilities.cpp
+++ b/media/liboboe/src/utility/OboeUtilities.cpp
@@ -28,24 +28,19 @@
 using namespace android;
 
 oboe_size_bytes_t OboeConvert_formatToSizeInBytes(oboe_audio_format_t format) {
-    oboe_datatype_t dataType = OBOE_AUDIO_FORMAT_DATA_TYPE(format);
-    oboe_size_bytes_t size;
-    switch (dataType) {
-        case OBOE_AUDIO_DATATYPE_UINT8:
-            size = sizeof(uint8_t);
-            break;
-        case OBOE_AUDIO_DATATYPE_INT16:
+    oboe_size_bytes_t size = OBOE_ERROR_ILLEGAL_ARGUMENT;
+    switch (format) {
+        case OBOE_AUDIO_FORMAT_PCM16:
             size = sizeof(int16_t);
             break;
-        case OBOE_AUDIO_DATATYPE_INT32:
-        case OBOE_AUDIO_DATATYPE_INT824:
+        case OBOE_AUDIO_FORMAT_PCM32:
+        case OBOE_AUDIO_FORMAT_PCM824:
             size = sizeof(int32_t);
             break;
-        case OBOE_AUDIO_DATATYPE_FLOAT32:
+        case OBOE_AUDIO_FORMAT_PCM_FLOAT:
             size = sizeof(float);
             break;
         default:
-            size = OBOE_ERROR_ILLEGAL_ARGUMENT;
             break;
     }
     return size;
diff --git a/media/liboboe/tests/Android.mk b/media/liboboe/tests/Android.mk
index f2c65d9..165669b 100644
--- a/media/liboboe/tests/Android.mk
+++ b/media/liboboe/tests/Android.mk
@@ -6,10 +6,9 @@
     frameworks/av/media/liboboe/include \
     frameworks/av/media/liboboe/src/core \
     frameworks/av/media/liboboe/src/utility
-LOCAL_SRC_FILES:= test_oboe_api.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
-                          libbinder libcutils libutils \
-                          libaudioclient liblog
+LOCAL_SRC_FILES := test_oboe_api.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
 LOCAL_STATIC_LIBRARIES := liboboe
 LOCAL_MODULE := test_oboe_api
 include $(BUILD_NATIVE_TEST)
@@ -21,7 +20,23 @@
     frameworks/av/media/liboboe/src/core \
     frameworks/av/media/liboboe/src/utility
 LOCAL_SRC_FILES:= test_handle_tracker.cpp
-LOCAL_SHARED_LIBRARIES := libbinder libcutils libutils liblog
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
 LOCAL_STATIC_LIBRARIES := liboboe
 LOCAL_MODULE := test_handle_tracker
 include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+    $(call include-path-for, audio-utils) \
+    frameworks/av/media/liboboe/include \
+    frameworks/av/media/liboboe/src \
+    frameworks/av/media/liboboe/src/core \
+    frameworks/av/media/liboboe/src/fifo \
+    frameworks/av/media/liboboe/src/utility
+LOCAL_SRC_FILES:= test_marshalling.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+                          libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := liboboe
+LOCAL_MODULE := test_marshalling
+include $(BUILD_NATIVE_TEST)
diff --git a/media/liboboe/tests/test_handle_tracker.cpp b/media/liboboe/tests/test_handle_tracker.cpp
index ae7384e..a146e76 100644
--- a/media/liboboe/tests/test_handle_tracker.cpp
+++ b/media/liboboe/tests/test_handle_tracker.cpp
@@ -56,7 +56,7 @@
         EXPECT_EQ(&data, found);
         // should fail the second time
         found = tracker.remove(type, dataHandle);
-        EXPECT_EQ(NULL, found);
+        EXPECT_EQ(nullptr, found);
     }
 }
 
diff --git a/media/liboboe/tests/test_marshalling.cpp b/media/liboboe/tests/test_marshalling.cpp
new file mode 100644
index 0000000..8f4cc2c
--- /dev/null
+++ b/media/liboboe/tests/test_marshalling.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for Oboe Marshalling of RingBuffer information.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <cutils/ashmem.h>
+#include <gtest/gtest.h>
+#include <sys/mman.h>
+
+#include <oboe/OboeDefinitions.h>
+#include <binding/AudioEndpointParcelable.h>
+
+using namespace android;
+using namespace oboe;
+
+// Test adding one value.
+TEST(test_marshalling, oboe_one_read_write) {
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    const int arbitraryValue = 235;
+    parcel.writeInt32(arbitraryValue);
+    parcel.setDataPosition(pos);
+    int32_t y;
+    parcel.readInt32(&y);
+    EXPECT_EQ(arbitraryValue, y);
+}
+
+// Test SharedMemoryParcel.
+TEST(test_marshalling, oboe_shared_memory) {
+    SharedMemoryParcelable sharedMemoryA;
+    SharedMemoryParcelable sharedMemoryB;
+    const size_t memSizeBytes = 840;
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemoryA.setup(fd, memSizeBytes);
+    void *region1;
+    EXPECT_EQ(OBOE_OK, sharedMemoryA.resolve(0, 16, &region1)); // fits in region
+    EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(-2, 16, &region1)); // offset is negative
+    EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(0, memSizeBytes + 8, &region1)); // size too big
+    EXPECT_NE(OBOE_OK, sharedMemoryA.resolve(memSizeBytes - 8, 16, &region1)); // goes past the end
+    int32_t *buffer1 = (int32_t *)region1;
+    buffer1[0] = 98735; // arbitrary value
+
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    sharedMemoryA.writeToParcel(&parcel);
+
+    parcel.setDataPosition(pos);
+    sharedMemoryB.readFromParcel(&parcel);
+    EXPECT_EQ(sharedMemoryA.getSizeInBytes(), sharedMemoryB.getSizeInBytes());
+
+    // should see same value at two different addresses
+    void *region2;
+    EXPECT_EQ(OBOE_OK, sharedMemoryB.resolve(0, 16, &region2));
+    int32_t *buffer2 = (int32_t *)region2;
+    EXPECT_NE(buffer1, buffer2);
+    EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test SharedRegionParcel.
+TEST(test_marshalling, oboe_shared_region) {
+    SharedMemoryParcelable sharedMemories[2];
+    SharedRegionParcelable sharedRegionA;
+    SharedRegionParcelable sharedRegionB;
+    const size_t memSizeBytes = 840;
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemories[0].setup(fd, memSizeBytes);
+    int32_t regionOffset1 = 32;
+    int32_t regionSize1 = 16;
+    sharedRegionA.setup(0, regionOffset1, regionSize1);
+
+    void *region1;
+    EXPECT_EQ(OBOE_OK, sharedRegionA.resolve(sharedMemories, &region1));
+    int32_t *buffer1 = (int32_t *)region1;
+    buffer1[0] = 336677; // arbitrary value
+
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    sharedRegionA.writeToParcel(&parcel);
+
+    parcel.setDataPosition(pos);
+    sharedRegionB.readFromParcel(&parcel);
+
+    // should see same value
+    void *region2;
+    EXPECT_EQ(OBOE_OK, sharedRegionB.resolve(sharedMemories, &region2));
+    int32_t *buffer2 = (int32_t *)region2;
+    EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test RingBufferParcelable.
+TEST(test_marshalling, oboe_ring_buffer_parcelable) {
+    SharedMemoryParcelable sharedMemories[2];
+    RingBufferParcelable ringBufferA;
+    RingBufferParcelable ringBufferB;
+
+    const size_t bytesPerFrame = 8;
+    const size_t framesPerBurst = 32;
+    const size_t dataSizeBytes = 2048;
+    const int32_t counterSizeBytes = sizeof(int64_t);
+    const size_t memSizeBytes = dataSizeBytes + (2 * counterSizeBytes);
+
+    int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+    ASSERT_LE(0, fd);
+    sharedMemories[0].setup(fd, memSizeBytes);
+
+    int32_t sharedMemoryIndex = 0;
+    // arrange indices and data in the shared memory
+    int32_t readOffset = 0;
+    int32_t writeOffset = readOffset + counterSizeBytes;
+    int32_t dataOffset = writeOffset + counterSizeBytes;
+    ringBufferA.setupMemory(sharedMemoryIndex, dataOffset, dataSizeBytes,
+        readOffset, writeOffset, counterSizeBytes);
+    ringBufferA.setFramesPerBurst(framesPerBurst);
+    ringBufferA.setBytesPerFrame(bytesPerFrame);
+    ringBufferA.setCapacityInFrames(dataSizeBytes / bytesPerFrame);
+
+    // setup A
+    RingBufferDescriptor descriptorA;
+    EXPECT_EQ(OBOE_OK, ringBufferA.resolve(sharedMemories, &descriptorA));
+    descriptorA.dataAddress[0] = 95;
+    descriptorA.dataAddress[1] = 57;
+    descriptorA.readCounterAddress[0] = 17;
+    descriptorA.writeCounterAddress[0] = 39;
+
+    // write A to parcel
+    Parcel parcel;
+    size_t pos = parcel.dataPosition();
+    ringBufferA.writeToParcel(&parcel);
+
+    // read B from parcel
+    parcel.setDataPosition(pos);
+    ringBufferB.readFromParcel(&parcel);
+
+    RingBufferDescriptor descriptorB;
+    EXPECT_EQ(OBOE_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
+
+    // A and B should match
+    EXPECT_EQ(descriptorA.dataAddress[0], descriptorB.dataAddress[0]);
+    EXPECT_EQ(descriptorA.dataAddress[1], descriptorB.dataAddress[1]);
+    EXPECT_EQ(descriptorA.readCounterAddress[0], descriptorB.readCounterAddress[0]);
+    EXPECT_EQ(descriptorA.writeCounterAddress[0], descriptorB.writeCounterAddress[0]);
+
+    EXPECT_EQ(ringBufferA.getFramesPerBurst(), ringBufferB.getFramesPerBurst());
+    EXPECT_EQ(ringBufferA.getBytesPerFrame(), ringBufferB.getBytesPerFrame());
+    EXPECT_EQ(ringBufferA.getCapacityInFrames(), ringBufferB.getCapacityInFrames());
+}
diff --git a/media/liboboe/tests/test_oboe_api.cpp b/media/liboboe/tests/test_oboe_api.cpp
index acf3000..0bc469f 100644
--- a/media/liboboe/tests/test_oboe_api.cpp
+++ b/media/liboboe/tests/test_oboe_api.cpp
@@ -32,7 +32,7 @@
     const oboe_sample_rate_t requestedSampleRate1 = 48000;
     const oboe_sample_rate_t requestedSampleRate2 = 44100;
     const int32_t requestedSamplesPerFrame = 2;
-    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_DATATYPE_INT16;
+    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
 
     oboe_sample_rate_t sampleRate = 0;
     int32_t samplesPerFrame = 0;
@@ -94,7 +94,6 @@
     EXPECT_EQ(OBOE_ERROR_INVALID_HANDLE, OboeStreamBuilder_getSampleRate(oboeBuilder2, &sampleRate));
 }
 
-
 // Test creating a default stream with everything unspecified.
 TEST(test_oboe_api, oboe_stream_unspecified) {
     OboeStreamBuilder oboeBuilder;
@@ -114,18 +113,17 @@
 }
 
 // Test Writing to an OboeStream
-TEST(test_oboe_api, oboe_stream) {
+void runtest_oboe_stream(oboe_sharing_mode_t requestedSharingMode) {
     const oboe_sample_rate_t requestedSampleRate = 48000;
     const oboe_sample_rate_t requestedSamplesPerFrame = 2;
-    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_DATATYPE_INT16;
-    //const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_EXCLUSIVE; // MMAP NOIRQ
-    const oboe_sharing_mode_t requestedSharingMode = OBOE_SHARING_MODE_LEGACY; // AudioTrack
+    const oboe_audio_format_t requestedDataFormat = OBOE_AUDIO_FORMAT_PCM16;
 
     oboe_sample_rate_t actualSampleRate = -1;
     int32_t actualSamplesPerFrame = -1;
-    oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_PCM824;
+    oboe_audio_format_t actualDataFormat = OBOE_AUDIO_FORMAT_INVALID;
     oboe_sharing_mode_t actualSharingMode;
     oboe_size_frames_t framesPerBurst = -1;
+    int writeLoops = 0;
 
     oboe_size_frames_t framesWritten = 0;
     oboe_size_frames_t framesPrimed = 0;
@@ -162,22 +160,30 @@
 
     // Check to see what kind of stream we actually got.
     EXPECT_EQ(OBOE_OK, OboeStream_getSampleRate(oboeStream, &actualSampleRate));
-    EXPECT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000);  // TODO what is range?
+    ASSERT_TRUE(actualSampleRate >= 44100 && actualSampleRate <= 96000);  // TODO what is range?
 
     EXPECT_EQ(OBOE_OK, OboeStream_getSamplesPerFrame(oboeStream, &actualSamplesPerFrame));
-    EXPECT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
+    ASSERT_TRUE(actualSamplesPerFrame >= 1 && actualSamplesPerFrame <= 16); // TODO what is max?
 
     EXPECT_EQ(OBOE_OK, OboeStream_getSharingMode(oboeStream, &actualSharingMode));
-    EXPECT_TRUE(actualSharingMode == OBOE_SHARING_MODE_EXCLUSIVE
-            || actualSharingMode == OBOE_SHARING_MODE_LEGACY);
+    ASSERT_TRUE(actualSharingMode == OBOE_SHARING_MODE_EXCLUSIVE
+                || actualSharingMode == OBOE_SHARING_MODE_LEGACY);
+
+    EXPECT_EQ(OBOE_OK, OboeStream_getFormat(oboeStream, &actualDataFormat));
+    EXPECT_NE(OBOE_AUDIO_FORMAT_INVALID, actualDataFormat);
 
     EXPECT_EQ(OBOE_OK, OboeStream_getFramesPerBurst(oboeStream, &framesPerBurst));
-    EXPECT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
+    ASSERT_TRUE(framesPerBurst >= 16 && framesPerBurst <= 1024); // TODO what is min/max?
 
     // Allocate a buffer for the audio data.
-    int16_t *data = new int16_t[framesPerBurst * actualSamplesPerFrame];
-    ASSERT_TRUE(NULL != data);
+    // TODO handle possibility of other data formats
+    ASSERT_TRUE(actualDataFormat == OBOE_AUDIO_FORMAT_PCM16);
+    size_t dataSizeSamples = framesPerBurst * actualSamplesPerFrame;
+    int16_t *data = new int16_t[dataSizeSamples];
+    ASSERT_TRUE(nullptr != data);
+    memset(data, 0, sizeof(int16_t) * dataSizeSamples);
 
+    // Prime the buffer.
     timeoutNanos = 0;
     do {
         framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
@@ -185,67 +191,71 @@
         framesTotal += framesWritten;
         ASSERT_GE(framesWritten, 0);
         ASSERT_LE(framesWritten, framesPerBurst);
-    } while(framesWritten > 0);
+    } while (framesWritten > 0);
     ASSERT_TRUE(framesTotal > 0);
 
-    // Start and wait for server to respond.
-    ASSERT_EQ(OBOE_OK, OboeStream_requestStart(oboeStream));
-    ASSERT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
-                                                     OBOE_STREAM_STATE_STARTING,
-                                                     &state,
-                                                     DEFAULT_STATE_TIMEOUT));
-    EXPECT_EQ(OBOE_STREAM_STATE_STARTED, state);
+    // Start/write/pause more than once to see if it fails after the first time.
+    // Write some data and measure the rate to see if the timing is OK.
+    for (int numLoops = 0; numLoops < 2; numLoops++) {
+        // Start and wait for server to respond.
+        ASSERT_EQ(OBOE_OK, OboeStream_requestStart(oboeStream));
+        ASSERT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
+                                                         OBOE_STREAM_STATE_STARTING,
+                                                         &state,
+                                                         DEFAULT_STATE_TIMEOUT));
+        EXPECT_EQ(OBOE_STREAM_STATE_STARTED, state);
 
-    // Write some data while we are running. Read counter should be advancing.
-    int loops = 1 * actualSampleRate / framesPerBurst; // 1 second
-    ASSERT_LT(2, loops); // detect absurdly high framesPerBurst
-    timeoutNanos = 10 * OBOE_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
-    framesWritten = 1;
-    ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
-    oboeFramesRead1 = oboeFramesRead;
-    oboe_nanoseconds_t beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
-    do {
-        framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
-        ASSERT_GE(framesWritten, 0);
-        ASSERT_LE(framesWritten, framesPerBurst);
+        // Write some data while we are running. Read counter should be advancing.
+        writeLoops = 1 * actualSampleRate / framesPerBurst; // 1 second
+        ASSERT_LT(2, writeLoops); // detect absurdly high framesPerBurst
+        timeoutNanos = 10 * OBOE_NANOS_PER_SECOND * framesPerBurst / actualSampleRate; // bursts
+        framesWritten = 1;
+        ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
+        oboeFramesRead1 = oboeFramesRead;
+        oboe_nanoseconds_t beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+        do {
+            framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
+            ASSERT_GE(framesWritten, 0);
+            ASSERT_LE(framesWritten, framesPerBurst);
 
-        framesTotal += framesWritten;
-        EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
-        EXPECT_EQ(framesTotal, oboeFramesWritten);
+            framesTotal += framesWritten;
+            EXPECT_EQ(OBOE_OK, OboeStream_getFramesWritten(oboeStream, &oboeFramesWritten));
+            EXPECT_EQ(framesTotal, oboeFramesWritten);
 
-        // Try to get a more accurate measure of the sample rate.
-        if (beginTime == 0) {
-            EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
-            if (oboeFramesRead > oboeFramesRead1) { // is read pointer advancing
-                beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
-                oboeFramesRead1 = oboeFramesRead;
+            // Try to get a more accurate measure of the sample rate.
+            if (beginTime == 0) {
+                EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
+                if (oboeFramesRead > oboeFramesRead1) { // is read pointer advancing
+                    beginTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+                    oboeFramesRead1 = oboeFramesRead;
+                }
             }
+        } while (framesWritten > 0 && writeLoops-- > 0);
+
+        EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
+        oboe_nanoseconds_t endTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
+        ASSERT_GT(oboeFramesRead2, 0);
+        ASSERT_GT(oboeFramesRead2, oboeFramesRead1);
+        ASSERT_LE(oboeFramesRead2, oboeFramesWritten);
+
+        // TODO why is legacy so inaccurate?
+        const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
+        if (requestedSharingMode != OBOE_SHARING_MODE_LEGACY) {
+            // Calculate approximate sample rate and compare with stream rate.
+            double seconds = (endTime - beginTime) / (double) OBOE_NANOS_PER_SECOND;
+            double measuredRate = (oboeFramesRead2 - oboeFramesRead1) / seconds;
+            ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
         }
-    } while (framesWritten > 0 && loops-- > 0);
 
-    EXPECT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
-    oboe_nanoseconds_t endTime = Oboe_getNanoseconds(OBOE_CLOCK_MONOTONIC);
-    ASSERT_GT(oboeFramesRead2, 0);
-    ASSERT_GT(oboeFramesRead2, oboeFramesRead1);
-    ASSERT_LE(oboeFramesRead2, oboeFramesWritten);
-
-    // TODO why is legacy so inaccurate?
-    const double rateTolerance = 200.0; // arbitrary tolerance for sample rate
-    if (requestedSharingMode != OBOE_SHARING_MODE_LEGACY) {
-        // Calculate approximate sample rate and compare with stream rate.
-        double seconds = (endTime - beginTime) / (double) OBOE_NANOS_PER_SECOND;
-        double measuredRate = (oboeFramesRead2 - oboeFramesRead1) / seconds;
-        ASSERT_NEAR(actualSampleRate, measuredRate, rateTolerance);
+        // Request async pause and wait for server to say that it has completed the pause.
+        ASSERT_EQ(OBOE_OK, OboeStream_requestPause(oboeStream));
+        EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
+                                                OBOE_STREAM_STATE_PAUSING,
+                                                &state,
+                                                DEFAULT_STATE_TIMEOUT));
+        EXPECT_EQ(OBOE_STREAM_STATE_PAUSED, state);
     }
 
-    // Request async pause and wait for server to say that it has completed the pause.
-    ASSERT_EQ(OBOE_OK, OboeStream_requestPause(oboeStream));
-    EXPECT_EQ(OBOE_OK, OboeStream_waitForStateChange(oboeStream,
-                                            OBOE_STREAM_STATE_PAUSING,
-                                            &state,
-                                            DEFAULT_STATE_TIMEOUT));
-    EXPECT_EQ(OBOE_STREAM_STATE_PAUSED, state);
-
     // Make sure the read counter is not advancing when we are paused.
     ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead));
     ASSERT_GE(oboeFramesRead, oboeFramesRead2); // monotonic increase
@@ -255,13 +265,14 @@
     ASSERT_EQ(OBOE_OK, OboeStream_getFramesRead(oboeStream, &oboeFramesRead2));
     EXPECT_EQ(oboeFramesRead, oboeFramesRead2);
 
-    // Fill up the buffer.
+    // ------------------- TEST FLUSH -----------------
+    // Prime the buffer.
     timeoutNanos = 0;
-    loops = 100;
+    writeLoops = 100;
     do {
         framesWritten = OboeStream_write(oboeStream, data, framesPerBurst, timeoutNanos);
         framesTotal += framesWritten;
-    } while (framesWritten > 0 && loops-- > 0);
+    } while (framesWritten > 0 && writeLoops-- > 0);
     EXPECT_EQ(0, framesWritten);
 
     // Flush and wait for server to respond.
@@ -286,6 +297,16 @@
     EXPECT_EQ(OBOE_OK, OboeStream_close(oboeStream));
 }
 
+// Test Writing to an OboeStream using LEGACY sharing mode.
+TEST(test_oboe_api, oboe_stream_legacy) {
+    runtest_oboe_stream(OBOE_SHARING_MODE_LEGACY);
+}
+
+// Test Writing to an OboeStream using EXCLUSIVE sharing mode.
+TEST(test_oboe_api, oboe_stream_exclusive) {
+    runtest_oboe_stream(OBOE_SHARING_MODE_EXCLUSIVE);
+}
+
 #define OBOE_THREAD_ANSWER          1826375
 #define OBOE_THREAD_DURATION_MSEC       500
 
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index f247475..c63ab47 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5543,6 +5543,7 @@
                 }
 
                 size_t size = buffer->size();
+                size_t offset = buffer->offset();
                 if (buffer->base() != info->mCodecData->base()) {
                     ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
                          mCodec->mComponentName.c_str(),
@@ -5560,7 +5561,7 @@
                     }
                     size = info->mCodecData->size();
                 } else {
-                    info->mCodecData->setRange(0, size);
+                    info->mCodecData->setRange(offset, size);
                 }
 
                 if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index f3d622b..25dd6b1 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -18,7 +18,6 @@
         DataConverter.cpp                 \
         DataSource.cpp                    \
         DataURISource.cpp                 \
-        DRMExtractor.cpp                  \
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
deleted file mode 100644
index 8ba36d5..0000000
--- a/media/libstagefright/DRMExtractor.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "include/DRMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-
-#include <drm/drm_framework_common.h>
-#include <utils/Errors.h>
-
-
-namespace android {
-
-class DRMSource : public MediaSource {
-public:
-    DRMSource(const sp<IMediaSource> &mediaSource,
-            const sp<DecryptHandle> &decryptHandle,
-            DrmManagerClient *managerClient,
-            int32_t trackId, DrmBuffer *ipmpBox);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-    virtual sp<MetaData> getFormat();
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
-    virtual ~DRMSource();
-
-private:
-    sp<IMediaSource> mOriginalMediaSource;
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient* mDrmManagerClient;
-    size_t mTrackId;
-    mutable Mutex mDRMLock;
-    size_t mNALLengthSize;
-    bool mWantsNALFragments;
-
-    DRMSource(const DRMSource &);
-    DRMSource &operator=(const DRMSource &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMSource::DRMSource(const sp<IMediaSource> &mediaSource,
-        const sp<DecryptHandle> &decryptHandle,
-        DrmManagerClient *managerClient,
-        int32_t trackId, DrmBuffer *ipmpBox)
-    : mOriginalMediaSource(mediaSource),
-      mDecryptHandle(decryptHandle),
-      mDrmManagerClient(managerClient),
-      mTrackId(trackId),
-      mNALLengthSize(0),
-      mWantsNALFragments(false) {
-    CHECK(mDrmManagerClient);
-    mDrmManagerClient->initializeDecryptUnit(
-            mDecryptHandle, trackId, ipmpBox);
-
-    const char *mime;
-    bool success = getFormat()->findCString(kKeyMIMEType, &mime);
-    CHECK(success);
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-        uint32_t type;
-        const void *data;
-        size_t size;
-        CHECK(getFormat()->findData(kKeyAVCC, &type, &data, &size));
-
-        const uint8_t *ptr = (const uint8_t *)data;
-
-        CHECK(size >= 7);
-        CHECK_EQ(ptr[0], 1);  // configurationVersion == 1
-
-        // The number of bytes used to encode the length of a NAL unit.
-        mNALLengthSize = 1 + (ptr[4] & 3);
-    }
-}
-
-DRMSource::~DRMSource() {
-    Mutex::Autolock autoLock(mDRMLock);
-    mDrmManagerClient->finalizeDecryptUnit(mDecryptHandle, mTrackId);
-}
-
-status_t DRMSource::start(MetaData *params) {
-    int32_t val;
-    if (params && params->findInt32(kKeyWantsNALFragments, &val)
-        && val != 0) {
-        mWantsNALFragments = true;
-    } else {
-        mWantsNALFragments = false;
-    }
-
-   return mOriginalMediaSource->start(params);
-}
-
-status_t DRMSource::stop() {
-    return mOriginalMediaSource->stop();
-}
-
-sp<MetaData> DRMSource::getFormat() {
-    return mOriginalMediaSource->getFormat();
-}
-
-status_t DRMSource::read(MediaBuffer **buffer, const ReadOptions *options) {
-    Mutex::Autolock autoLock(mDRMLock);
-    status_t err;
-    if ((err = mOriginalMediaSource->read(buffer, options)) != OK) {
-        return err;
-    }
-
-    size_t len = (*buffer)->range_length();
-
-    char *src = (char *)(*buffer)->data() + (*buffer)->range_offset();
-
-    DrmBuffer encryptedDrmBuffer(src, len);
-    DrmBuffer decryptedDrmBuffer;
-    decryptedDrmBuffer.length = len;
-    decryptedDrmBuffer.data = new char[len];
-    DrmBuffer *pDecryptedDrmBuffer = &decryptedDrmBuffer;
-
-    if ((err = mDrmManagerClient->decrypt(mDecryptHandle, mTrackId,
-            &encryptedDrmBuffer, &pDecryptedDrmBuffer)) != NO_ERROR) {
-
-        if (decryptedDrmBuffer.data) {
-            delete [] decryptedDrmBuffer.data;
-            decryptedDrmBuffer.data = NULL;
-        }
-
-        return err;
-    }
-    CHECK(pDecryptedDrmBuffer == &decryptedDrmBuffer);
-
-    const char *mime;
-    CHECK(getFormat()->findCString(kKeyMIMEType, &mime));
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) && !mWantsNALFragments) {
-        uint8_t *dstData = (uint8_t*)src;
-        size_t srcOffset = 0;
-        size_t dstOffset = 0;
-
-        len = decryptedDrmBuffer.length;
-        while (srcOffset < len) {
-            CHECK(srcOffset + mNALLengthSize <= len);
-            size_t nalLength = 0;
-            const uint8_t* data = (const uint8_t*)(&decryptedDrmBuffer.data[srcOffset]);
-
-            switch (mNALLengthSize) {
-                case 1:
-                    nalLength = *data;
-                    break;
-                case 2:
-                    nalLength = U16_AT(data);
-                    break;
-                case 3:
-                    nalLength = ((size_t)data[0] << 16) | U16_AT(&data[1]);
-                    break;
-                case 4:
-                    nalLength = U32_AT(data);
-                    break;
-                default:
-                    CHECK(!"Should not be here.");
-                    break;
-            }
-
-            srcOffset += mNALLengthSize;
-
-            size_t end = srcOffset + nalLength;
-            if (end > len || end < srcOffset) {
-                if (decryptedDrmBuffer.data) {
-                    delete [] decryptedDrmBuffer.data;
-                    decryptedDrmBuffer.data = NULL;
-                }
-
-                return ERROR_MALFORMED;
-            }
-
-            if (nalLength == 0) {
-                continue;
-            }
-
-            if (dstOffset > SIZE_MAX - 4 ||
-                dstOffset + 4 > SIZE_MAX - nalLength ||
-                dstOffset + 4 + nalLength > (*buffer)->size()) {
-                (*buffer)->release();
-                (*buffer) = NULL;
-                if (decryptedDrmBuffer.data) {
-                    delete [] decryptedDrmBuffer.data;
-                    decryptedDrmBuffer.data = NULL;
-                }
-                return ERROR_MALFORMED;
-            }
-
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 0;
-            dstData[dstOffset++] = 1;
-            memcpy(&dstData[dstOffset], &decryptedDrmBuffer.data[srcOffset], nalLength);
-            srcOffset += nalLength;
-            dstOffset += nalLength;
-        }
-
-        CHECK_EQ(srcOffset, len);
-        (*buffer)->set_range((*buffer)->range_offset(), dstOffset);
-
-    } else {
-        memcpy(src, decryptedDrmBuffer.data, decryptedDrmBuffer.length);
-        (*buffer)->set_range((*buffer)->range_offset(), decryptedDrmBuffer.length);
-    }
-
-    if (decryptedDrmBuffer.data) {
-        delete [] decryptedDrmBuffer.data;
-        decryptedDrmBuffer.data = NULL;
-    }
-
-    return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMExtractor::DRMExtractor(const sp<DataSource> &source, const char* mime)
-    : mDataSource(source),
-      mDecryptHandle(NULL),
-      mDrmManagerClient(NULL) {
-    mOriginalExtractor = MediaExtractor::Create(source, mime);
-    mOriginalExtractor->getMetaData()->setInt32(kKeyIsDRM, 1);
-
-    source->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-}
-
-DRMExtractor::~DRMExtractor() {
-}
-
-size_t DRMExtractor::countTracks() {
-    return mOriginalExtractor->countTracks();
-}
-
-sp<IMediaSource> DRMExtractor::getTrack(size_t index) {
-    sp<IMediaSource> originalMediaSource = mOriginalExtractor->getTrack(index);
-    originalMediaSource->getFormat()->setInt32(kKeyIsDRM, 1);
-
-    int32_t trackID;
-    CHECK(getTrackMetaData(index, 0)->findInt32(kKeyTrackID, &trackID));
-
-    DrmBuffer ipmpBox;
-    ipmpBox.data = mOriginalExtractor->getDrmTrackInfo(trackID, &(ipmpBox.length));
-    CHECK(ipmpBox.length > 0);
-
-    return interface_cast<IMediaSource>(
-            new DRMSource(originalMediaSource, mDecryptHandle, mDrmManagerClient,
-            trackID, &ipmpBox));
-}
-
-sp<MetaData> DRMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
-    return mOriginalExtractor->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> DRMExtractor::getMetaData() {
-    return mOriginalExtractor->getMetaData();
-}
-
-bool SniffDRM(
-    const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *) {
-    sp<DecryptHandle> decryptHandle = source->DrmInitialization();
-
-    if (decryptHandle != NULL) {
-        if (decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED) {
-            *mimeType = String8("drm+container_based+") + decryptHandle->mimeType;
-            *confidence = 10.0f;
-        } else if (decryptHandle->decryptApiType == DecryptApiType::ELEMENTARY_STREAM_BASED) {
-            *mimeType = String8("drm+es_based+") + decryptHandle->mimeType;
-            *confidence = 10.0f;
-        } else {
-            return false;
-        }
-
-        return true;
-    }
-
-    return false;
-}
-} //namespace android
-
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 716f5d8..02a1239 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -3260,8 +3260,14 @@
         mOwner->writeInt32(0);
     } else {
         int32_t width, height;
-        bool success = mMeta->findInt32(kKeyWidth, &width);
-        success = success && mMeta->findInt32(kKeyHeight, &height);
+        bool success = mMeta->findInt32(kKeyDisplayWidth, &width);
+        success = success && mMeta->findInt32(kKeyDisplayHeight, &height);
+
+        // Use width/height if display width/height are not present.
+        if (!success) {
+            success = mMeta->findInt32(kKeyWidth, &width);
+            success = success && mMeta->findInt32(kKeyHeight, &height);
+        }
         CHECK(success);
 
         mOwner->writeInt32(width << 16);   // 32-bit fixed-point value
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 49f480d..df4d9bf 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -27,7 +27,6 @@
 #include "include/OggExtractor.h"
 #include "include/MPEG2PSExtractor.h"
 #include "include/MPEG2TSExtractor.h"
-#include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
 #include "include/AACExtractor.h"
 #include "include/MidiExtractor.h"
@@ -51,8 +50,7 @@
 
 namespace android {
 
-MediaExtractor::MediaExtractor():
-    mIsDrm(false) {
+MediaExtractor::MediaExtractor() {
     if (!LOG_NDEBUG) {
         uid_t uid = getuid();
         struct passwd *pw = getpwuid(uid);
@@ -148,23 +146,6 @@
         ALOGW("creating media extractor in calling process");
         return CreateFromService(source, mime);
     } else {
-        String8 mime8;
-        float confidence;
-        sp<AMessage> meta;
-
-        // Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
-        // process, not the extractor process.
-        if (SniffDRM(source, &mime8, &confidence, &meta)) {
-            const char *drmMime = mime8.string();
-            ALOGV("Detected media content as '%s' with confidence %.2f", drmMime, confidence);
-            if (!strncmp(drmMime, "drm+es_based+", 13)) {
-                // DRMExtractor sets container metadata kKeyIsDRM to 1
-                return new DRMExtractor(source, drmMime + 14);
-            } else {
-                mime = drmMime + 20; // get real mimetype after "drm+container_based+" prefix
-            }
-        }
-
         // remote extractor
         ALOGV("get service manager");
         sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
@@ -187,6 +168,9 @@
     ALOGV("MediaExtractor::CreateFromService %s", mime);
     RegisterDefaultSniffers();
 
+    // initialize source decryption if needed
+    source->DrmInitialization();
+
     sp<AMessage> meta;
 
     String8 tmp;
@@ -299,9 +283,6 @@
     RegisterSniffer_l(SniffMPEG2PS);
     RegisterSniffer_l(SniffMidi);
 
-    if (property_get_bool("drm.service.enabled", false)) {
-        RegisterSniffer_l(SniffDRM);
-    }
     gSniffersRegistered = true;
 }
 
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 8061bc6..5ce2b76 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -522,8 +522,6 @@
         return ERROR_MALFORMED;
     }
 
-    mSyncSampleOffset = data_offset;
-
     uint8_t header[8];
     if (mDataSource->readAt(
                 data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
@@ -535,13 +533,13 @@
         return ERROR_MALFORMED;
     }
 
-    mNumSyncSamples = U32_AT(&header[4]);
+    uint32_t numSyncSamples = U32_AT(&header[4]);
 
-    if (mNumSyncSamples < 2) {
+    if (numSyncSamples < 2) {
         ALOGV("Table of sync samples is empty or has only a single entry!");
     }
 
-    uint64_t allocSize = (uint64_t)mNumSyncSamples * sizeof(uint32_t);
+    uint64_t allocSize = (uint64_t)numSyncSamples * sizeof(uint32_t);
     if (allocSize > kMaxTotalSize) {
         ALOGE("Sync sample table size too large.");
         return ERROR_OUT_OF_RANGE;
@@ -559,19 +557,21 @@
         return ERROR_OUT_OF_RANGE;
     }
 
-    mSyncSamples = new (std::nothrow) uint32_t[mNumSyncSamples];
+    mSyncSamples = new (std::nothrow) uint32_t[numSyncSamples];
     if (!mSyncSamples) {
         ALOGE("Cannot allocate sync sample table with %llu entries.",
-                (unsigned long long)mNumSyncSamples);
+                (unsigned long long)numSyncSamples);
         return ERROR_OUT_OF_RANGE;
     }
 
-    if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples,
+    if (mDataSource->readAt(data_offset + 8, mSyncSamples,
             (size_t)allocSize) != (ssize_t)allocSize) {
+        delete mSyncSamples;
+        mSyncSamples = NULL;
         return ERROR_IO;
     }
 
-    for (size_t i = 0; i < mNumSyncSamples; ++i) {
+    for (size_t i = 0; i < numSyncSamples; ++i) {
         if (mSyncSamples[i] == 0) {
             ALOGE("b/32423862, unexpected zero value in stss");
             continue;
@@ -579,6 +579,9 @@
         mSyncSamples[i] = ntohl(mSyncSamples[i]) - 1;
     }
 
+    mSyncSampleOffset = data_offset;
+    mNumSyncSamples = numSyncSamples;
+
     return OK;
 }
 
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index f2638ed..ec02fb9 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1770,6 +1770,45 @@
     *sync = settings;
 }
 
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering) {
+    msg->setInt32("init-mode", buffering.mInitialBufferingMode);
+    msg->setInt32("rebuffer-mode", buffering.mRebufferingMode);
+    msg->setInt32("init-ms", buffering.mInitialWatermarkMs);
+    msg->setInt32("init-kb", buffering.mInitialWatermarkKB);
+    msg->setInt32("rebuffer-low-ms", buffering.mRebufferingWatermarkLowMs);
+    msg->setInt32("rebuffer-high-ms", buffering.mRebufferingWatermarkHighMs);
+    msg->setInt32("rebuffer-low-kb", buffering.mRebufferingWatermarkLowKB);
+    msg->setInt32("rebuffer-high-kb", buffering.mRebufferingWatermarkHighKB);
+}
+
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */) {
+    int32_t value;
+    if (msg->findInt32("init-mode", &value)) {
+        buffering->mInitialBufferingMode = (BufferingMode)value;
+    }
+    if (msg->findInt32("rebuffer-mode", &value)) {
+        buffering->mRebufferingMode = (BufferingMode)value;
+    }
+    if (msg->findInt32("init-ms", &value)) {
+        buffering->mInitialWatermarkMs = value;
+    }
+    if (msg->findInt32("init-kb", &value)) {
+        buffering->mInitialWatermarkKB = value;
+    }
+    if (msg->findInt32("rebuffer-low-ms", &value)) {
+        buffering->mRebufferingWatermarkLowMs = value;
+    }
+    if (msg->findInt32("rebuffer-high-ms", &value)) {
+        buffering->mRebufferingWatermarkHighMs = value;
+    }
+    if (msg->findInt32("rebuffer-low-kb", &value)) {
+        buffering->mRebufferingWatermarkLowKB = value;
+    }
+    if (msg->findInt32("rebuffer-high-kb", &value)) {
+        buffering->mRebufferingWatermarkHighKB = value;
+    }
+}
+
 AString nameForFd(int fd) {
     const size_t SIZE = 256;
     char buffer[SIZE];
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
index c1aaa17..db061c1 100644
--- a/media/libstagefright/filters/GraphicBufferListener.cpp
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -22,6 +22,7 @@
 #include <media/stagefright/MediaErrors.h>
 
 #include <gui/BufferItem.h>
+#include <utils/String8.h>
 
 #include "GraphicBufferListener.h"
 
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 045e044..1b0db33 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -22,8 +22,8 @@
 
 #include "AMessage.h"
 
-#include <android/log.h>
 #include <binder/Parcel.h>
+#include <log/log.h>
 
 #include "AAtomizer.h"
 #include "ABuffer.h"
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 477280a..e144942 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -49,11 +49,6 @@
 const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
 const int64_t LiveSession::kResumeThresholdUs = 100000ll;
 
-// Buffer Prepare/Ready/Underflow Marks
-const int64_t LiveSession::kReadyMarkUs = 5000000ll;
-const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
-const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
-
 struct LiveSession::BandwidthEstimator : public RefBase {
     BandwidthEstimator();
 
@@ -495,6 +490,13 @@
     return new HTTPDownloader(mHTTPService, mExtraHeaders);
 }
 
+void LiveSession::setBufferingSettings(
+        const BufferingSettings &buffering) {
+    sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+    writeToAMessage(msg, buffering);
+    msg->post();
+}
+
 void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatConnect, this);
@@ -620,6 +622,12 @@
 
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
+        case kWhatSetBufferingSettings:
+        {
+            readFromAMessage(msg, &mBufferingSettings);
+            break;
+        }
+
         case kWhatConnect:
         {
             onConnect(msg);
@@ -830,7 +838,10 @@
                     // If switching up, require a cushion bigger than kUnderflowMark
                     // to avoid buffering immediately after the switch.
                     // (If we don't have that cushion we'd rather cancel and try again.)
-                    int64_t delayUs = switchUp ? (kUnderflowMarkUs + 1000000ll) : 0;
+                    int64_t delayUs =
+                        switchUp ?
+                            (mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll + 1000000ll)
+                            : 0;
                     bool needResumeUntil = false;
                     sp<AMessage> stopParams = msg;
                     if (checkSwitchProgress(stopParams, delayUs, &needResumeUntil)) {
@@ -2189,13 +2200,16 @@
         }
 
         ++activeCount;
-        int64_t readyMark = mInPreparationPhase ? kPrepareMarkUs : kReadyMarkUs;
-        if (bufferedDurationUs > readyMark
+        int64_t readyMarkUs =
+            (mInPreparationPhase ?
+                mBufferingSettings.mInitialWatermarkMs :
+                mBufferingSettings.mRebufferingWatermarkHighMs) * 1000ll;
+        if (bufferedDurationUs > readyMarkUs
                 || mPacketSources[i]->isFinished(0)) {
             ++readyCount;
         }
         if (!mPacketSources[i]->isFinished(0)) {
-            if (bufferedDurationUs < kUnderflowMarkUs) {
+            if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll) {
                 ++underflowCount;
             }
             if (bufferedDurationUs > mUpSwitchMark) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index a0138be..abf8cf0 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -18,6 +18,7 @@
 
 #define LIVE_SESSION_H_
 
+#include <media/BufferingSettings.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/mediaplayer.h>
 
@@ -72,6 +73,8 @@
             uint32_t flags,
             const sp<IMediaHTTPService> &httpService);
 
+    void setBufferingSettings(const BufferingSettings &buffering);
+
     int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
     status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
 
@@ -129,6 +132,7 @@
         kWhatChangeConfiguration2       = 'chC2',
         kWhatChangeConfiguration3       = 'chC3',
         kWhatPollBuffering              = 'poll',
+        kWhatSetBufferingSettings       = 'sBuS',
     };
 
     // Bandwidth Switch Mark Defaults
@@ -138,9 +142,7 @@
     static const int64_t kResumeThresholdUs;
 
     // Buffer Prepare/Ready/Underflow Marks
-    static const int64_t kReadyMarkUs;
-    static const int64_t kPrepareMarkUs;
-    static const int64_t kUnderflowMarkUs;
+    BufferingSettings mBufferingSettings;
 
     struct BandwidthEstimator;
     struct BandwidthItem {
diff --git a/media/libstagefright/include/DRMExtractor.h b/media/libstagefright/include/DRMExtractor.h
deleted file mode 100644
index 3dc7df8..0000000
--- a/media/libstagefright/include/DRMExtractor.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DRM_EXTRACTOR_H_
-
-#define DRM_EXTRACTOR_H_
-
-#include <media/IMediaSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <drm/DrmManagerClient.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class SampleTable;
-class String8;
-class DecryptHandle;
-
-class DRMExtractor : public MediaExtractor {
-public:
-    DRMExtractor(const sp<DataSource> &source, const char *mime);
-
-    virtual size_t countTracks();
-    virtual sp<IMediaSource> getTrack(size_t index);
-    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-    virtual sp<MetaData> getMetaData();
-    virtual const char * name() { return "DRMExtractor"; }
-
-protected:
-    virtual ~DRMExtractor();
-
-private:
-    sp<DataSource> mDataSource;
-
-    sp<IMediaExtractor> mOriginalExtractor;
-    sp<DecryptHandle> mDecryptHandle;
-    DrmManagerClient* mDrmManagerClient;
-
-    DRMExtractor(const DRMExtractor &);
-    DRMExtractor &operator=(const DRMExtractor &);
-};
-
-bool SniffDRM(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-            sp<AMessage> *);
-
-}  // namespace android
-
-#endif  // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index c20e9fc..ea86a37 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -1559,7 +1559,8 @@
     switch (omxBuffer.mBufferType) {
     case OMXBuffer::kBufferTypePreset:
         return emptyBuffer_l(
-                buffer, 0, omxBuffer.mRangeLength, flags, timestamp, fenceFd);
+                buffer, omxBuffer.mRangeOffset, omxBuffer.mRangeLength,
+                flags, timestamp, fenceFd);
 
     case OMXBuffer::kBufferTypeANWBuffer:
         return emptyGraphicBuffer_l(
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index d11a10d..d0696a8 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -107,6 +107,7 @@
     .bInterfaceClass = USB_CLASS_STILL_IMAGE,
     .bInterfaceSubClass = 1,
     .bInterfaceProtocol = 1,
+    .iInterface = 1,
 };
 
 const struct usb_interface_descriptor ptp_interface_desc = {
@@ -259,14 +260,23 @@
     .intr_comp = ss_intr_comp,
 };
 
+#define STR_INTERFACE "MTP"
 const struct {
     struct usb_functionfs_strings_head header;
+    struct {
+        __le16 code;
+        const char str1[sizeof(STR_INTERFACE)];
+    } __attribute__((packed)) lang0;
 } __attribute__((packed)) strings = {
     .header = {
         .magic = cpu_to_le32(FUNCTIONFS_STRINGS_MAGIC),
         .length = cpu_to_le32(sizeof(strings)),
-        .str_count = cpu_to_le32(0),
-        .lang_count = cpu_to_le32(0),
+        .str_count = cpu_to_le32(1),
+        .lang_count = cpu_to_le32(1),
+    },
+    .lang0 = {
+        .code = cpu_to_le16(0x0409),
+        .str1 = STR_INTERFACE,
     },
 };
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 35eceb2..e97d1ed 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -80,6 +80,7 @@
 class EffectsFactoryHalInterface;
 class FastMixer;
 class PassthruBufferProvider;
+class RecordBufferConverter;
 class ServerProxy;
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e025316..b1ede30 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -29,6 +29,7 @@
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/RecordBufferConverter.h>
 #include <media/TypeConverter.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -1263,6 +1264,7 @@
     bool chainCreated = false;
     bool effectCreated = false;
     bool effectRegistered = false;
+    audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
 
     lStatus = initCheck();
     if (lStatus != NO_ERROR) {
@@ -1296,15 +1298,16 @@
         ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
 
         if (effect == 0) {
-            audio_unique_id_t id = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+            effectId = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
             // Check CPU and memory usage
-            lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
+            lStatus = AudioSystem::registerEffect(
+                    desc, mId, chain->strategy(), sessionId, effectId);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
             effectRegistered = true;
             // create a new effect module if none present in the chain
-            lStatus = chain->createEffect_l(effect, this, desc, id, sessionId, pinned);
+            lStatus = chain->createEffect_l(effect, this, desc, effectId, sessionId, pinned);
             if (lStatus != NO_ERROR) {
                 goto Exit;
             }
@@ -1333,7 +1336,7 @@
             chain->removeEffect_l(effect);
         }
         if (effectRegistered) {
-            AudioSystem::unregisterEffect(effect->id());
+            AudioSystem::unregisterEffect(effectId);
         }
         if (chainCreated) {
             removeEffectChain_l(chain);
@@ -6936,252 +6939,6 @@
     buffer->frameCount = 0;
 }
 
-AudioFlinger::RecordThread::RecordBufferConverter::RecordBufferConverter(
-        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-        uint32_t srcSampleRate,
-        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-        uint32_t dstSampleRate) :
-            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
-            // mSrcFormat
-            // mSrcSampleRate
-            // mDstChannelMask
-            // mDstFormat
-            // mDstSampleRate
-            // mSrcChannelCount
-            // mDstChannelCount
-            // mDstFrameSize
-            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
-            mResampler(NULL),
-            mIsLegacyDownmix(false),
-            mIsLegacyUpmix(false),
-            mRequiresFloat(false),
-            mInputConverterProvider(NULL)
-{
-    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
-            dstChannelMask, dstFormat, dstSampleRate);
-}
-
-AudioFlinger::RecordThread::RecordBufferConverter::~RecordBufferConverter() {
-    free(mBuf);
-    delete mResampler;
-    delete mInputConverterProvider;
-}
-
-size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
-        AudioBufferProvider *provider, size_t frames)
-{
-    if (mInputConverterProvider != NULL) {
-        mInputConverterProvider->setBufferProvider(provider);
-        provider = mInputConverterProvider;
-    }
-
-    if (mResampler == NULL) {
-        ALOGVV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
-                mSrcSampleRate, mSrcFormat, mDstFormat);
-
-        AudioBufferProvider::Buffer buffer;
-        for (size_t i = frames; i > 0; ) {
-            buffer.frameCount = i;
-            status_t status = provider->getNextBuffer(&buffer);
-            if (status != OK || buffer.frameCount == 0) {
-                frames -= i; // cannot fill request.
-                break;
-            }
-            // format convert to destination buffer
-            convertNoResampler(dst, buffer.raw, buffer.frameCount);
-
-            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
-            i -= buffer.frameCount;
-            provider->releaseBuffer(&buffer);
-        }
-    } else {
-         ALOGVV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
-                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
-
-         // reallocate buffer if needed
-         if (mBufFrameSize != 0 && mBufFrames < frames) {
-             free(mBuf);
-             mBufFrames = frames;
-             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
-         }
-        // resampler accumulates, but we only have one source track
-        memset(mBuf, 0, frames * mBufFrameSize);
-        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
-        // format convert to destination buffer
-        convertResampler(dst, mBuf, frames);
-    }
-    return frames;
-}
-
-status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters(
-        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-        uint32_t srcSampleRate,
-        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-        uint32_t dstSampleRate)
-{
-    // quick evaluation if there is any change.
-    if (mSrcFormat == srcFormat
-            && mSrcChannelMask == srcChannelMask
-            && mSrcSampleRate == srcSampleRate
-            && mDstFormat == dstFormat
-            && mDstChannelMask == dstChannelMask
-            && mDstSampleRate == dstSampleRate) {
-        return NO_ERROR;
-    }
-
-    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
-            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
-            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
-    const bool valid =
-            audio_is_input_channel(srcChannelMask)
-            && audio_is_input_channel(dstChannelMask)
-            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
-            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
-            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
-            ; // no upsampling checks for now
-    if (!valid) {
-        return BAD_VALUE;
-    }
-
-    mSrcFormat = srcFormat;
-    mSrcChannelMask = srcChannelMask;
-    mSrcSampleRate = srcSampleRate;
-    mDstFormat = dstFormat;
-    mDstChannelMask = dstChannelMask;
-    mDstSampleRate = dstSampleRate;
-
-    // compute derived parameters
-    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
-    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
-    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
-
-    // do we need to resample?
-    delete mResampler;
-    mResampler = NULL;
-    if (mSrcSampleRate != mDstSampleRate) {
-        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
-                mSrcChannelCount, mDstSampleRate);
-        mResampler->setSampleRate(mSrcSampleRate);
-        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
-    }
-
-    // are we running legacy channel conversion modes?
-    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
-                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
-                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
-    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
-                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
-                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
-
-    // do we need to process in float?
-    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
-
-    // do we need a staging buffer to convert for destination (we can still optimize this)?
-    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
-    if (mResampler != NULL) {
-        mBufFrameSize = max(mSrcChannelCount, FCC_2)
-                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
-    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
-        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
-    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
-        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
-    } else {
-        mBufFrameSize = 0;
-    }
-    mBufFrames = 0; // force the buffer to be resized.
-
-    // do we need an input converter buffer provider to give us float?
-    delete mInputConverterProvider;
-    mInputConverterProvider = NULL;
-    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
-        mInputConverterProvider = new ReformatBufferProvider(
-                audio_channel_count_from_in_mask(mSrcChannelMask),
-                mSrcFormat,
-                AUDIO_FORMAT_PCM_FLOAT,
-                256 /* provider buffer frame count */);
-    }
-
-    // do we need a remixer to do channel mask conversion
-    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
-        (void) memcpy_by_index_array_initialization_from_channel_mask(
-                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
-    }
-    return NO_ERROR;
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertNoResampler(
-        void *dst, const void *src, size_t frames)
-{
-    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
-    if (mBufFrameSize != 0 && mBufFrames < frames) {
-        free(mBuf);
-        mBufFrames = frames;
-        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
-    }
-    // do we need to do legacy upmix and downmix?
-    if (mIsLegacyUpmix || mIsLegacyDownmix) {
-        void *dstBuf = mBuf != NULL ? mBuf : dst;
-        if (mIsLegacyUpmix) {
-            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
-                    (const float *)src, frames);
-        } else /*mIsLegacyDownmix */ {
-            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
-                    (const float *)src, frames);
-        }
-        if (mBuf != NULL) {
-            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
-                    frames * mDstChannelCount);
-        }
-        return;
-    }
-    // do we need to do channel mask conversion?
-    if (mSrcChannelMask != mDstChannelMask) {
-        void *dstBuf = mBuf != NULL ? mBuf : dst;
-        memcpy_by_index_array(dstBuf, mDstChannelCount,
-                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
-        if (dstBuf == dst) {
-            return; // format is the same
-        }
-    }
-    // convert to destination buffer
-    const void *convertBuf = mBuf != NULL ? mBuf : src;
-    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
-            frames * mDstChannelCount);
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertResampler(
-        void *dst, /*not-a-const*/ void *src, size_t frames)
-{
-    // src buffer format is ALWAYS float when entering this routine
-    if (mIsLegacyUpmix) {
-        ; // mono to stereo already handled by resampler
-    } else if (mIsLegacyDownmix
-            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
-        // the resampler outputs stereo for mono input channel (a feature?)
-        // must convert to mono
-        downmix_to_mono_float_from_stereo_float((float *)src,
-                (const float *)src, frames);
-    } else if (mSrcChannelMask != mDstChannelMask) {
-        // convert to mono channel again for channel mask conversion (could be skipped
-        // with further optimization).
-        if (mSrcChannelCount == 1) {
-            downmix_to_mono_float_from_stereo_float((float *)src,
-                (const float *)src, frames);
-        }
-        // convert to destination format (in place, OK as float is larger than other types)
-        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
-            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
-                    frames * mSrcChannelCount);
-        }
-        // channel convert and save to dst
-        memcpy_by_index_array(dst, mDstChannelCount,
-                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
-        return;
-    }
-    // convert to destination format and save to dst
-    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
-            frames * mDstChannelCount);
-}
 
 bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
                                                         status_t& status)
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index e43f001..3fb0b07 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1323,92 +1323,6 @@
                                             // rolling counter that is never cleared
     };
 
-    /* The RecordBufferConverter is used for format, channel, and sample rate
-     * conversion for a RecordTrack.
-     *
-     * TODO: Self contained, so move to a separate file later.
-     *
-     * RecordBufferConverter uses the convert() method rather than exposing a
-     * buffer provider interface; this is to save a memory copy.
-     */
-    class RecordBufferConverter
-    {
-    public:
-        RecordBufferConverter(
-                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-                uint32_t srcSampleRate,
-                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-                uint32_t dstSampleRate);
-
-        ~RecordBufferConverter();
-
-        /* Converts input data from an AudioBufferProvider by format, channelMask,
-         * and sampleRate to a destination buffer.
-         *
-         * Parameters
-         *      dst:  buffer to place the converted data.
-         * provider:  buffer provider to obtain source data.
-         *   frames:  number of frames to convert
-         *
-         * Returns the number of frames converted.
-         */
-        size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
-
-        // returns NO_ERROR if constructor was successful
-        status_t initCheck() const {
-            // mSrcChannelMask set on successful updateParameters
-            return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
-        }
-
-        // allows dynamic reconfigure of all parameters
-        status_t updateParameters(
-                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
-                uint32_t srcSampleRate,
-                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
-                uint32_t dstSampleRate);
-
-        // called to reset resampler buffers on record track discontinuity
-        void reset() {
-            if (mResampler != NULL) {
-                mResampler->reset();
-            }
-        }
-
-    private:
-        // format conversion when not using resampler
-        void convertNoResampler(void *dst, const void *src, size_t frames);
-
-        // format conversion when using resampler; modifies src in-place
-        void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
-
-        // user provided information
-        audio_channel_mask_t mSrcChannelMask;
-        audio_format_t       mSrcFormat;
-        uint32_t             mSrcSampleRate;
-        audio_channel_mask_t mDstChannelMask;
-        audio_format_t       mDstFormat;
-        uint32_t             mDstSampleRate;
-
-        // derived information
-        uint32_t             mSrcChannelCount;
-        uint32_t             mDstChannelCount;
-        size_t               mDstFrameSize;
-
-        // format conversion buffer
-        void                *mBuf;
-        size_t               mBufFrames;
-        size_t               mBufFrameSize;
-
-        // resampler info
-        AudioResampler      *mResampler;
-
-        bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
-        bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
-        bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
-        PassthruBufferProvider *mInputConverterProvider;    // converts input to float
-        int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
-    };
-
 #include "RecordTracks.h"
 
             RecordThread(const sp<AudioFlinger>& audioFlinger,
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 48e09c7..f2dd884 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -32,6 +32,7 @@
 
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
+#include <media/RecordBufferConverter.h>
 #include <audio_utils/minifloat.h>
 
 // ----------------------------------------------------------------------------
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 1d6787a..c2981a1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -21,7 +21,7 @@
 #include "AudioGain.h"
 #include "TypeConverter.h"
 
-#include <android/log.h>
+#include <log/log.h>
 #include <utils/String8.h>
 
 namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index f19b43c..dbdcca7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -23,7 +23,7 @@
 #include "AudioGain.h"
 #include "TypeConverter.h"
 
-#include <android/log.h>
+#include <log/log.h>
 #include <utils/String8.h>
 
 namespace android {
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d1edb56..5b4d10d 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -48,8 +48,10 @@
     device3/Camera3OutputStream.cpp \
     device3/Camera3ZslStream.cpp \
     device3/Camera3DummyStream.cpp \
+    device3/Camera3SharedOutputStream.cpp \
     device3/StatusTracker.cpp \
     device3/Camera3BufferManager.cpp \
+    device3/Camera3StreamSplitter.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp \
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index d490119..a55c23b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -152,6 +152,7 @@
     }
 
     List<const CameraMetadata> metadataRequestList;
+    std::list<const SurfaceMap> surfaceMapList;
     submitInfo->mRequestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
 
@@ -191,11 +192,11 @@
         }
 
         /**
-         * Write in the output stream IDs which we calculate from
-         * the capture request's list of surface targets
+         * Write in the output stream IDs and map from stream ID to surface ID
+         * which we calculate from the capture request's list of surface target
          */
+        SurfaceMap surfaceMap;
         Vector<int32_t> outputStreamIds;
-        outputStreamIds.setCapacity(request.mSurfaceList.size());
         for (sp<Surface> surface : request.mSurfaceList) {
             if (surface == 0) continue;
 
@@ -211,10 +212,16 @@
                         "Request targets Surface that is not part of current capture session");
             }
 
-            int streamId = mStreamMap.valueAt(idx);
-            outputStreamIds.push_back(streamId);
-            ALOGV("%s: Camera %s: Appending output stream %d to request",
-                    __FUNCTION__, mCameraIdStr.string(), streamId);
+            const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
+            if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
+                surfaceMap[streamSurfaceId.streamId()] = std::vector<size_t>();
+                outputStreamIds.push_back(streamSurfaceId.streamId());
+            }
+            surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+
+            ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
+                    __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
+                    streamSurfaceId.surfaceId());
         }
 
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
@@ -231,11 +238,13 @@
                 loopCounter, requests.size());
 
         metadataRequestList.push_back(metadata);
+        surfaceMapList.push_back(surfaceMap);
     }
     mRequestIdCounter++;
 
     if (streaming) {
-        err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
+                &(submitInfo->mLastFrameNumber));
         if (err != OK) {
             String8 msg = String8::format(
                 "Camera %s:  Got error %s (%d) after trying to set streaming request",
@@ -248,7 +257,8 @@
             mStreamingRequestId = submitInfo->mRequestId;
         }
     } else {
-        err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        err = mDevice->captureList(metadataRequestList, surfaceMapList,
+                &(submitInfo->mLastFrameNumber));
         if (err != OK) {
             String8 msg = String8::format(
                 "Camera %s: Got error %s (%d) after trying to submit capture request",
@@ -312,8 +322,9 @@
 }
 
 binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
-    ALOGV("%s: ending configure (%d input stream, %zu output streams)",
-            __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+    ALOGV("%s: ending configure (%d input stream, %zu output surfaces)",
+            __FUNCTION__, mInputStream.configured ? 1 : 0,
+            mStreamMap.size());
 
     binder::Status res;
     if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -376,7 +387,7 @@
     }
 
     bool isInput = false;
-    ssize_t index = NAME_NOT_FOUND;
+    std::vector<sp<IBinder>> surfaces;
     ssize_t dIndex = NAME_NOT_FOUND;
 
     if (mInputStream.configured && mInputStream.id == streamId) {
@@ -384,26 +395,24 @@
     } else {
         // Guard against trying to delete non-created streams
         for (size_t i = 0; i < mStreamMap.size(); ++i) {
-            if (streamId == mStreamMap.valueAt(i)) {
-                index = i;
+            if (streamId == mStreamMap.valueAt(i).streamId()) {
+                surfaces.push_back(mStreamMap.keyAt(i));
+            }
+        }
+
+        // See if this stream is one of the deferred streams.
+        for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
+            if (streamId == mDeferredStreams[i]) {
+                dIndex = i;
                 break;
             }
         }
 
-        if (index == NAME_NOT_FOUND) {
-            // See if this stream is one of the deferred streams.
-            for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
-                if (streamId == mDeferredStreams[i]) {
-                    dIndex = i;
-                    break;
-                }
-            }
-            if (dIndex == NAME_NOT_FOUND) {
-                String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
-                        " stream created yet", mCameraIdStr.string(), streamId);
-                ALOGW("%s: %s", __FUNCTION__, msg.string());
-                return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
-            }
+        if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
+            String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
+                    " stream created yet", mCameraIdStr.string(), streamId);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
     }
 
@@ -418,10 +427,14 @@
     } else {
         if (isInput) {
             mInputStream.configured = false;
-        } else if (index != NAME_NOT_FOUND) {
-            mStreamMap.removeItemsAt(index);
         } else {
-            mDeferredStreams.removeItemsAt(dIndex);
+            for (auto& surface : surfaces) {
+                mStreamMap.removeItem(surface);
+            }
+
+            if (dIndex != NAME_NOT_FOUND) {
+                mDeferredStreams.removeItemsAt(dIndex);
+            }
         }
     }
 
@@ -439,14 +452,39 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
-    bool deferredConsumer = bufferProducer == NULL;
+    const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+            outputConfiguration.getGraphicBufferProducers();
+    size_t numBufferProducers = bufferProducers.size();
+
+    if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+        ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+              __FUNCTION__, bufferProducers.size(), MAX_SURFACES_PER_STREAM);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+    }
+    if (numBufferProducers == 0) {
+        ALOGE("%s: GraphicBufferProducer count 0 is not valid", __FUNCTION__);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Malformed surface");
+    }
+    size_t deferredConsumerCnt = 0;
+    for (auto bufferProducer : bufferProducers) {
+        if (bufferProducer == nullptr) {
+            deferredConsumerCnt++;
+        }
+    }
+    if (deferredConsumerCnt > MAX_DEFERRED_SURFACES) {
+        ALOGE("%s: %zu deferred consumer is not supported", __FUNCTION__, deferredConsumerCnt);
+        return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "More than %d deferred consumer", MAX_DEFERRED_SURFACES);
+    }
+    bool deferredConsumer = deferredConsumerCnt > 0;
+    bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 1;
     int surfaceType = outputConfiguration.getSurfaceType();
     bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
             (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
     if (deferredConsumer && !validSurfaceType) {
         ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
-                __FUNCTION__, bufferProducer.get(), surfaceType);
+                __FUNCTION__, bufferProducers[0].get(), surfaceType);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
 
@@ -454,103 +492,165 @@
         return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
+    std::vector<sp<Surface>> surfaces;
+    std::vector<sp<IBinder>> binders;
+    int streamWidth, streamHeight, streamFormat;
     int width, height, format;
+    int32_t streamConsumerUsage;
     int32_t consumerUsage;
-    android_dataspace dataSpace;
+    android_dataspace dataSpace, streamDataSpace;
     status_t err;
 
     // Create stream for deferred surface case.
-    if (deferredConsumer) {
+    if (deferredConsumerOnly) {
         return createDeferredSurfaceStreamLocked(outputConfiguration, newStreamId);
     }
 
-    // Don't create multiple streams for the same target surface
-    {
-        ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
-        if (index != NAME_NOT_FOUND) {
-            String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
-                    "(ID %zd)", mCameraIdStr.string(), index);
-            ALOGW("%s: %s", __FUNCTION__, msg.string());
-            return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+    bool isFirstSurface = true;
+    streamWidth = -1;
+    streamHeight = -1;
+    streamFormat = -1;
+    streamDataSpace = HAL_DATASPACE_UNKNOWN;
+    streamConsumerUsage = 0;
+
+    for (auto& bufferProducer : bufferProducers) {
+        if (bufferProducer == nullptr) {
+            continue;
         }
-    }
 
-    // HACK b/10949105
-    // Query consumer usage bits to set async operation mode for
-    // GLConsumer using controlledByApp parameter.
-    bool useAsync = false;
-    if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
-            &consumerUsage)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
-        ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
-                __FUNCTION__, mCameraIdStr.string(), consumerUsage);
-        useAsync = true;
-    }
+        // Don't create multiple streams for the same target surface
+        {
+            ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
+            if (index != NAME_NOT_FOUND) {
+                String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
+                        "(ID %zd)", mCameraIdStr.string(), index);
+                ALOGW("%s: %s", __FUNCTION__, msg.string());
+                return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+            }
+        }
 
-    int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
-                              GRALLOC_USAGE_RENDERSCRIPT;
-    int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
-                           GraphicBuffer::USAGE_HW_TEXTURE |
-                           GraphicBuffer::USAGE_HW_COMPOSER;
-    bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
-            (consumerUsage & allowedFlags) != 0;
+        // HACK b/10949105
+        // Query consumer usage bits to set async operation mode for
+        // GLConsumer using controlledByApp parameter.
+        bool useAsync = false;
+        if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+                &consumerUsage)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+            ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
+                    __FUNCTION__, mCameraIdStr.string(), consumerUsage);
+            useAsync = true;
+        }
 
-    sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-    sp<Surface> surface = new Surface(bufferProducer, useAsync);
-    ANativeWindow *anw = surface.get();
+        int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+                                  GRALLOC_USAGE_RENDERSCRIPT;
+        int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+                               GraphicBuffer::USAGE_HW_TEXTURE |
+                               GraphicBuffer::USAGE_HW_COMPOSER;
+        bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+                (consumerUsage & allowedFlags) != 0;
 
-    if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
-    if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
-                            reinterpret_cast<int*>(&dataSpace))) != OK) {
-        String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
-                mCameraIdStr.string(), strerror(-err), err);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
-    }
+        sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+        sp<Surface> surface = new Surface(bufferProducer, useAsync);
+        ANativeWindow *anw = surface.get();
 
-    // FIXME: remove this override since the default format should be
-    //       IMPLEMENTATION_DEFINED. b/9487482
-    if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
-        format <= HAL_PIXEL_FORMAT_BGRA_8888) {
-        ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
-              __FUNCTION__, mCameraIdStr.string(), format);
-        format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-    }
+        if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+                     mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
+        if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+                                reinterpret_cast<int*>(&dataSpace))) != OK) {
+            String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+                    mCameraIdStr.string(), strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+        }
 
-    // Round dimensions to the nearest dimensions available for this format
-    if (flexibleConsumer && isPublicFormat(format) &&
-            !CameraDeviceClient::roundBufferDimensionNearest(width, height,
-            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
-        String8 msg = String8::format("Camera %s: No supported stream configurations with "
-                "format %#x defined, failed to create output stream", mCameraIdStr.string(), format);
-        ALOGE("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        // FIXME: remove this override since the default format should be
+        //       IMPLEMENTATION_DEFINED. b/9487482
+        if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+            format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+            ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+                  __FUNCTION__, mCameraIdStr.string(), format);
+            format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+        }
+        // Round dimensions to the nearest dimensions available for this format
+        if (flexibleConsumer && isPublicFormat(format) &&
+                !CameraDeviceClient::roundBufferDimensionNearest(width, height,
+                format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+            String8 msg = String8::format("Camera %s: No supported stream configurations with "
+                    "format %#x defined, failed to create output stream",
+                    mCameraIdStr.string(), format);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (isFirstSurface) {
+            streamWidth = width;
+            streamHeight = height;
+            streamFormat = format;
+            streamDataSpace = dataSpace;
+            streamConsumerUsage = consumerUsage;
+            isFirstSurface = false;
+        }
+        if (width != streamWidth) {
+            String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+                     mCameraIdStr.string(), width, streamWidth);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (height != streamHeight) {
+            String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+                     mCameraIdStr.string(), height, streamHeight);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (format != streamFormat) {
+            String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+                     mCameraIdStr.string(), format, streamFormat);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        if (dataSpace != streamDataSpace) {
+            String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+                     mCameraIdStr.string(), dataSpace, streamDataSpace);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        //At the native side, there isn't a way to check whether 2 surfaces come from the same
+        //surface class type. Use usage flag to approximate the comparison.
+        //TODO: Support surfaces of different surface class type.
+        if (consumerUsage != streamConsumerUsage) {
+            String8 msg = String8::format(
+                    "Camera %s:Surface usage flag doesn't match 0x%x vs 0x%x",
+                    mCameraIdStr.string(), consumerUsage, streamConsumerUsage);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+
+        binders.push_back(binder);
+        surfaces.push_back(surface);
     }
 
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
-    err = mDevice->createStream(surface, width, height, format, dataSpace,
+    err = mDevice->createStream(surfaces, deferredConsumer, width, height, format, dataSpace,
             static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
             &streamId, outputConfiguration.getSurfaceSetID());
 
@@ -559,11 +659,15 @@
                 "Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
                 mCameraIdStr.string(), width, height, format, dataSpace, strerror(-err), err);
     } else {
-        mStreamMap.add(binder, streamId);
-
+        int i = 0;
+        for (auto& binder : binders) {
+            ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d",
+                    __FUNCTION__, binder.get(), streamId, i);
+            mStreamMap.add(binder, StreamSurfaceId(streamId, i++));
+        }
         ALOGV("%s: Camera %s: Successfully created a new stream ID %d for output surface"
-                " (%d x %d) with format 0x%x.",
-              __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
+                    " (%d x %d) with format 0x%x.",
+                  __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
 
         // Set transform flags to ensure preview to be rotated correctly.
         res = setStreamTransformLocked(streamId);
@@ -600,7 +704,9 @@
         consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
     }
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
-    err = mDevice->createStream(/*surface*/nullptr, width, height, format, dataSpace,
+    std::vector<sp<Surface>> noSurface;
+    err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
+            height, format, dataSpace,
             static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
             &streamId, outputConfiguration.getSurfaceSetID(), consumerUsage);
 
@@ -944,7 +1050,7 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
@@ -984,7 +1090,7 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
@@ -1032,7 +1138,7 @@
     // Guard against trying to prepare non-created streams
     ssize_t index = NAME_NOT_FOUND;
     for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
+        if (streamId == mStreamMap.valueAt(i).streamId()) {
             index = i;
             break;
         }
@@ -1070,26 +1176,42 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
+    const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
+            outputConfiguration.getGraphicBufferProducers();
 
     // Client code should guarantee that the surface is from SurfaceView or SurfaceTexture.
-    if (bufferProducer == NULL) {
-        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+    // And it's also saved in the last entry of graphicBufferProducer list
+    if (bufferProducers.size() == 0) {
+        ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
-    // Check if this stram id is one of the deferred streams
-    ssize_t index = NAME_NOT_FOUND;
-    for (size_t i = 0; i < mDeferredStreams.size(); i++) {
-        if (streamId == mDeferredStreams[i]) {
-            index = i;
-            break;
-        }
+
+    // Right now, only first surface in the OutputConfiguration is allowed to be
+    // deferred. And all other surfaces are checked to be the same (not null) at
+    // the Java side.
+    sp<IGraphicBufferProducer> bufferProducer = bufferProducers[0];
+    if (bufferProducer == nullptr) {
+        ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Target Surface is invalid");
     }
-    if (index == NAME_NOT_FOUND) {
-        String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
-                "(ID %d)", mCameraIdStr.string(), streamId);
-        ALOGW("%s: %s", __FUNCTION__, msg.string());
-        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+
+    // Check if this stream id is one of the deferred only streams
+    ssize_t index = NAME_NOT_FOUND;
+    if (bufferProducers.size() == 1) {
+        for (size_t i = 0; i < mDeferredStreams.size(); i++) {
+            if (streamId == mDeferredStreams[i]) {
+                index = i;
+                break;
+            }
+        }
+
+        if (index == NAME_NOT_FOUND) {
+            String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
+                    "(ID %d)", mCameraIdStr.string(), streamId);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
     }
 
     if (!mDevice.get()) {
@@ -1116,8 +1238,12 @@
     err = mDevice->setConsumerSurface(streamId, consumerSurface);
     if (err == OK) {
         sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-        mStreamMap.add(binder, streamId);
-        mDeferredStreams.removeItemsAt(index);
+        ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %zu", __FUNCTION__,
+                binder.get(), streamId, bufferProducers.size()-1);
+        mStreamMap.add(binder, StreamSurfaceId(streamId, bufferProducers.size()-1));
+        if (index != NAME_NOT_FOUND) {
+            mDeferredStreams.removeItemsAt(index);
+        }
     } else if (err == NO_INIT) {
         res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
                 "Camera %s: Deferred surface is invalid: %s (%d)",
@@ -1152,9 +1278,11 @@
         result.append("    No input stream configured.\n");
     }
     if (!mStreamMap.isEmpty()) {
-        result.append("    Current output stream IDs:\n");
+        result.append("    Current output stream/surface IDs:\n");
         for (size_t i = 0; i < mStreamMap.size(); i++) {
-            result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
+            result.appendFormat("      Stream %d Surface %d\n",
+                                mStreamMap.valueAt(i).streamId(),
+                                mStreamMap.valueAt(i).surfaceId());
         }
     } else if (!mDeferredStreams.isEmpty()) {
         result.append("    Current deferred surface output stream IDs:\n");
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 2226dd2..047ccf2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -180,6 +180,34 @@
     status_t              getRotationTransformLocked(/*out*/int32_t* transform);
 
 private:
+    // StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
+    // streamId specifies the index of the stream the surface belongs to, and the
+    // surfaceId specifies the index of the surface within the stream. (one stream
+    // could contain multiple surfaces.)
+    class StreamSurfaceId final {
+    public:
+        StreamSurfaceId() {
+            mStreamId = -1;
+            mSurfaceId = -1;
+        }
+        StreamSurfaceId(int32_t streamId, int32_t surfaceId) {
+            mStreamId = streamId;
+            mSurfaceId = surfaceId;
+        }
+        int32_t streamId() const {
+            return mStreamId;
+        }
+        int32_t surfaceId() const {
+            return mSurfaceId;
+        }
+
+    private:
+        int32_t mStreamId;
+        int32_t mSurfaceId;
+
+    }; // class StreamSurfaceId
+
+private:
     /** ICameraDeviceUser interface-related private members */
 
     /** Preview callback related members */
@@ -216,8 +244,8 @@
     //check if format is not custom format
     static bool isPublicFormat(int32_t format);
 
-    // IGraphicsBufferProducer binder -> Stream ID for output streams
-    KeyedVector<sp<IBinder>, int> mStreamMap;
+    // IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
+    KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
 
     struct InputStreamConfiguration {
         bool configured;
@@ -238,6 +266,9 @@
     // as there are no surfaces available and can not be put into mStreamMap. Once the deferred
     // Surface is configured, the stream id will be moved to mStreamMap.
     Vector<int32_t> mDeferredStreams;
+
+    static const int32_t MAX_SURFACES_PER_STREAM = 2;
+    static const int32_t MAX_DEFERRED_SURFACES = 1;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 40b368e..a873402 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
 #define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
 
+#include <list>
+
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <utils/String16.h>
@@ -37,6 +39,9 @@
 
 class CameraProviderManager;
 
+// Mapping of output stream index to surface ids
+typedef std::unordered_map<int, std::vector<size_t> > SurfaceMap;
+
 /**
  * Base interface for version >= 2 camera device classes, which interface to
  * camera HAL device versions >= 2.
@@ -73,6 +78,7 @@
      * Output lastFrameNumber is the expected last frame number of the list of requests.
      */
     virtual status_t captureList(const List<const CameraMetadata> &requests,
+                                 const std::list<const SurfaceMap> &surfaceMaps,
                                  int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -88,6 +94,7 @@
      * Output lastFrameNumber is the last frame number of the previous streaming request.
      */
     virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                             const std::list<const SurfaceMap> &surfaceMaps,
                                              int64_t *lastFrameNumber = NULL) = 0;
 
     /**
@@ -117,6 +124,19 @@
             uint32_t consumerUsage = 0) = 0;
 
     /**
+     * Create an output stream of the requested size, format, rotation and
+     * dataspace with a number of consumers.
+     *
+     * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
+     * logical dimensions of the buffer, not the number of bytes.
+     */
+    virtual status_t createStream(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t consumerUsage = 0) = 0;
+
+    /**
      * Create an input stream of width, height, and format.
      *
      * Return value is the stream ID if non-negative and an error if negative.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f64dc3..ae62e74 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -53,6 +53,7 @@
 #include "device3/Camera3InputStream.h"
 #include "device3/Camera3ZslStream.h"
 #include "device3/Camera3DummyStream.h"
+#include "device3/Camera3SharedOutputStream.h"
 #include "CameraService.h"
 
 using namespace android::camera3;
@@ -792,7 +793,9 @@
 }
 
 status_t Camera3Device::convertMetadataListToRequestListLocked(
-        const List<const CameraMetadata> &metadataList, bool repeating,
+        const List<const CameraMetadata> &metadataList,
+        const std::list<const SurfaceMap> &surfaceMaps,
+        bool repeating,
         RequestList *requestList) {
     if (requestList == NULL) {
         CLOGE("requestList cannot be NULL.");
@@ -800,9 +803,11 @@
     }
 
     int32_t burstId = 0;
-    for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
-            it != metadataList.end(); ++it) {
-        sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+    List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+    std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+    for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
+            ++metadataIt, ++surfaceMapIt) {
+        sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
         if (newRequest == 0) {
             CLOGE("Can't create capture request");
             return BAD_VALUE;
@@ -812,12 +817,12 @@
 
         // Setup burst Id and request Id
         newRequest->mResultExtras.burstId = burstId++;
-        if (it->exists(ANDROID_REQUEST_ID)) {
-            if (it->find(ANDROID_REQUEST_ID).count == 0) {
+        if (metadataIt->exists(ANDROID_REQUEST_ID)) {
+            if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
                 CLOGE("RequestID entry exists; but must not be empty in metadata");
                 return BAD_VALUE;
             }
-            newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+            newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
         } else {
             CLOGE("RequestID does not exist in metadata");
             return BAD_VALUE;
@@ -827,6 +832,10 @@
 
         ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
     }
+    if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
+        ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
+        return BAD_VALUE;
+    }
 
     // Setup batch size if this is a high speed video recording request.
     if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
@@ -846,12 +855,31 @@
     ATRACE_CALL();
 
     List<const CameraMetadata> requests;
+    std::list<const SurfaceMap> surfaceMaps;
+    convertToRequestList(requests, surfaceMaps, request);
+
+    return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+}
+
+void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+        std::list<const SurfaceMap>& surfaceMaps,
+        const CameraMetadata& request) {
     requests.push_back(request);
-    return captureList(requests, /*lastFrameNumber*/NULL);
+
+    SurfaceMap surfaceMap;
+    camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+    // With no surface list passed in, stream and surface will have 1-to-1
+    // mapping. So the surface index is 0 for each stream in the surfaceMap.
+    for (size_t i = 0; i < streams.count; i++) {
+        surfaceMap[streams.data.i32[i]].push_back(0);
+    }
+    surfaceMaps.push_back(surfaceMap);
 }
 
 status_t Camera3Device::submitRequestsHelper(
-        const List<const CameraMetadata> &requests, bool repeating,
+        const List<const CameraMetadata> &requests,
+        const std::list<const SurfaceMap> &surfaceMaps,
+        bool repeating,
         /*out*/
         int64_t *lastFrameNumber) {
     ATRACE_CALL();
@@ -866,8 +894,8 @@
 
     RequestList requestList;
 
-    res = convertMetadataListToRequestListLocked(requests, repeating,
-            /*out*/&requestList);
+    res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
+            repeating, /*out*/&requestList);
     if (res != OK) {
         // error logged by previous call
         return res;
@@ -1035,10 +1063,11 @@
 }
 
 status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+                                    const std::list<const SurfaceMap> &surfaceMaps,
                                     int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
-    return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
 }
 
 status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
@@ -1046,19 +1075,23 @@
     ATRACE_CALL();
 
     List<const CameraMetadata> requests;
-    requests.push_back(request);
-    return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+    std::list<const SurfaceMap> surfaceMaps;
+    convertToRequestList(requests, surfaceMaps, request);
+
+    return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+                                   /*lastFrameNumber*/NULL);
 }
 
 status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+                                                const std::list<const SurfaceMap> &surfaceMaps,
                                                 int64_t *lastFrameNumber) {
     ATRACE_CALL();
 
-    return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
 }
 
 sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
-        const CameraMetadata &request) {
+        const CameraMetadata &request, const SurfaceMap &surfaceMap) {
     status_t res;
 
     if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
@@ -1074,7 +1107,7 @@
         }
     }
 
-    sp<CaptureRequest> newRequest = createCaptureRequest(request);
+    sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
     return newRequest;
 }
 
@@ -1258,8 +1291,27 @@
 }
 
 status_t Camera3Device::createStream(sp<Surface> consumer,
-        uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
-        camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId, uint32_t consumerUsage) {
+    ATRACE_CALL();
+
+    if (consumer == nullptr) {
+        ALOGE("%s: consumer must not be null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    std::vector<sp<Surface>> consumers;
+    consumers.push_back(consumer);
+
+    return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
+            format, dataSpace, rotation, id, streamSetId, consumerUsage);
+}
+
+status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
+        bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+        int streamSetId, uint32_t consumerUsage) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
@@ -1303,18 +1355,24 @@
         streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
     }
 
+    if (consumers.size() == 0 && !hasDeferredConsumer) {
+        ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+        return BAD_VALUE;
+    }
     // HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
     // which requires a consumer surface to be available.
-    if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+    if (hasDeferredConsumer && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
         ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
         return BAD_VALUE;
     }
 
-    if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+    if (hasDeferredConsumer && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
         ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
         return BAD_VALUE;
     }
 
+    bool streamSharing  = consumers.size() > 1 || (consumers.size()  > 0 && hasDeferredConsumer);
+
     // Use legacy dataspace values for older HALs
     if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
         dataSpace = mapToLegacyDataspace(dataSpace);
@@ -1334,7 +1392,7 @@
                 return BAD_VALUE;
             }
         }
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, blobBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -1343,15 +1401,19 @@
             SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
             return BAD_VALUE;
         }
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
-    } else if (consumer == nullptr) {
+    } else if (consumers.size() == 0 && hasDeferredConsumer) {
         newStream = new Camera3OutputStream(mNextStreamId,
                 width, height, format, consumerUsage, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
+    } else if (streamSharing) {
+        newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
+                hasDeferredConsumer, width, height, format, consumerUsage,
+                dataSpace, rotation, mTimestampOffset, streamSetId);
     } else {
-        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+        newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, format, dataSpace, rotation,
                 mTimestampOffset, streamSetId);
     }
@@ -2029,16 +2091,18 @@
         return res;
     }
 
-    if (!stream->isConfiguring()) {
-        CLOGE("Stream %d was already fully configured.", streamId);
-        return INVALID_OPERATION;
-    }
+    if (stream->isConsumerConfigurationDeferred()) {
+        if (!stream->isConfiguring()) {
+            CLOGE("Stream %d was already fully configured.", streamId);
+            return INVALID_OPERATION;
+        }
 
-    res = stream->finishConfiguration();
-    if (res != OK) {
-        SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
-                stream->getId(), strerror(-res), res);
-        return res;
+        res = stream->finishConfiguration();
+        if (res != OK) {
+            SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                   stream->getId(), strerror(-res), res);
+            return res;
+        }
     }
 
     return OK;
@@ -2049,7 +2113,7 @@
  */
 
 sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
-        const CameraMetadata &request) {
+        const CameraMetadata &request, const SurfaceMap &surfaceMap) {
     ATRACE_CALL();
     status_t res;
 
@@ -2104,10 +2168,17 @@
                 mOutputStreams.editValueAt(idx);
 
         // It is illegal to include a deferred consumer output stream into a request
-        if (stream->isConsumerConfigurationDeferred()) {
-            CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
-                    stream->getId());
-            return NULL;
+        auto iter = surfaceMap.find(streams.data.i32[i]);
+        if (iter != surfaceMap.end()) {
+            const std::vector<size_t>& surfaces = iter->second;
+            for (const auto& surface : surfaces) {
+                if (stream->isConsumerConfigurationDeferred(surface)) {
+                    CLOGE("Stream %d surface %zu hasn't finished configuration yet "
+                          "due to deferred consumer", stream->getId(), surface);
+                    return NULL;
+                }
+            }
+            newRequest->mOutputSurfaces[i] = surfaces;
         }
 
         // Lazy completion of stream configuration (allocation/registration)
@@ -3927,6 +3998,14 @@
                 return TIMED_OUT;
             }
             halRequest->num_output_buffers++;
+
+            res = outputStream->notifyRequestedSurfaces(halRequest->frame_number,
+                    captureRequest->mOutputSurfaces[i]);
+            if (res != OK) {
+                ALOGE("RequestThread: Cannot register output surfaces: %s (%d)",
+                      strerror(-res), res);
+                return INVALID_OPERATION;
+            }
         }
         totalNumBuffers += halRequest->num_output_buffers;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 217c8b7..fe4508d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -95,10 +95,12 @@
     // idle state
     status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
     status_t captureList(const List<const CameraMetadata> &requests,
+            const std::list<const SurfaceMap> &surfaceMaps,
             int64_t *lastFrameNumber = NULL) override;
     status_t setStreamingRequest(const CameraMetadata &request,
             int64_t *lastFrameNumber = NULL) override;
     status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+            const std::list<const SurfaceMap> &surfaceMaps,
             int64_t *lastFrameNumber = NULL) override;
     status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
 
@@ -114,6 +116,12 @@
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
             int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
             uint32_t consumerUsage = 0) override;
+    status_t createStream(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t consumerUsage = 0) override;
+
     status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id) override;
@@ -342,6 +350,7 @@
         camera3_stream_buffer_t             mInputBuffer;
         Vector<sp<camera3::Camera3OutputStreamInterface> >
                                             mOutputStreams;
+        SurfaceMap                          mOutputSurfaces;
         CaptureResultExtras                 mResultExtras;
         // Used to cancel AE precapture trigger for devices doesn't support
         // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
@@ -360,11 +369,18 @@
 
     status_t convertMetadataListToRequestListLocked(
             const List<const CameraMetadata> &metadataList,
+            const std::list<const SurfaceMap> &surfaceMaps,
             bool repeating,
             /*out*/
             RequestList *requestList);
 
-    status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+    void convertToRequestList(List<const CameraMetadata>& requests,
+            std::list<const SurfaceMap>& surfaceMaps,
+            const CameraMetadata& request);
+
+    status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+                                  const std::list<const SurfaceMap> &surfaceMaps,
+                                  bool repeating,
                                   int64_t *lastFrameNumber = NULL);
 
 
@@ -436,13 +452,15 @@
      * Do common work for setting up a streaming or single capture request.
      * On success, will transition to ACTIVE if in IDLE.
      */
-    sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+    sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+                                          const SurfaceMap &surfaceMap);
 
     /**
      * Build a CaptureRequest request from the CameraDeviceBase request
      * settings.
      */
-    sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+    sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+                                            const SurfaceMap &surfaceMap);
 
     /**
      * Take the currently-defined set of streams and configure the HAL to use
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 5123785..7f61c7a 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -83,6 +83,14 @@
     return OK;
 }
 
+status_t Camera3DummyStream::notifyRequestedSurfaces(uint32_t frame_number,
+        const std::vector<size_t>& surface_ids) {
+    (void) frame_number;
+    (void) surface_ids;
+    // Do nothing
+    return OK;
+}
+
 status_t Camera3DummyStream::configureQueueLocked() {
     // Do nothing
     return OK;
@@ -103,7 +111,7 @@
     return false;
 }
 
-bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
     return false;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 18e8a23..37efbbb 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -56,6 +56,9 @@
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
     /**
      * Return if this output stream is for video encoding.
      */
@@ -64,7 +67,7 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
 
     /**
      * Set the consumer surface to the output stream.
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7229929..1e76a27 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -124,6 +124,7 @@
                                          int format,
                                          android_dataspace dataSpace,
                                          camera3_stream_rotation_t rotation,
+                                         uint32_t consumerUsage, nsecs_t timestampOffset,
                                          int setId) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
@@ -132,7 +133,8 @@
         mTraceFirstBuffer(true),
         mUseMonoTimestamp(false),
         mUseBufferManager(false),
-        mConsumerUsage(0) {
+        mTimestampOffset(timestampOffset),
+        mConsumerUsage(consumerUsage) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
         mBufferReleasedListener = new BufferReleasedListener(this);
@@ -373,6 +375,24 @@
         return res;
     }
 
+    if ((res = configureConsumerQueueLocked()) != OK) {
+        return res;
+    }
+
+    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+    // We need skip these cases as timeout will disable the non-blocking (async) mode.
+    if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
+        mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+    }
+
+    return OK;
+}
+
+status_t Camera3OutputStream::configureConsumerQueueLocked() {
+    status_t res;
+
+    mTraceFirstBuffer = true;
+
     ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
 
     // Configure consumer-side ANativeWindow interface. The listener may be used
@@ -470,12 +490,7 @@
     if (res != OK) {
         ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
                 __FUNCTION__, mTransform, strerror(-res), res);
-    }
-
-    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
-    // We need skip these cases as timeout will disable the non-blocking (async) mode.
-    if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
-        mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+        return res;
     }
 
     /**
@@ -568,14 +583,24 @@
 status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
 
     status_t res;
-    int32_t u = 0;
+
     if (mConsumer == nullptr) {
         // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
         *usage = mConsumerUsage;
         return OK;
     }
 
-    res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
+    res = getEndpointUsageForSurface(usage, mConsumer);
+
+    return res;
+}
+
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint32_t *usage,
+        const sp<Surface>& surface) const {
+    status_t res;
+    int32_t u = 0;
+
+    res = static_cast<ANativeWindow*>(surface.get())->query(surface.get(),
             NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
 
     // If an opaque output stream's endpoint is ImageReader, add
@@ -587,8 +612,8 @@
     //     3. GRALLOC_USAGE_HW_COMPOSER
     //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
     if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
-            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
-            GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+            GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
         u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
     }
 
@@ -676,8 +701,17 @@
     return OK;
 }
 
-bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+status_t Camera3OutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+        const std::vector<size_t>& /*surface_ids*/) {
+    return OK;
+}
+
+bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
     Mutex::Autolock l(mLock);
+
+    if (surface_id != 0) {
+        ALOGE("%s: surface_id for Camera3OutputStream should be 0!", __FUNCTION__);
+    }
     return mConsumer == nullptr;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 12d497e..26ea63f 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -135,7 +135,7 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
 
     /**
      * Set the consumer surface to the output stream.
@@ -158,6 +158,9 @@
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
     /**
      * Set the graphic buffer manager to get/return the stream buffers.
      *
@@ -169,6 +172,7 @@
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            uint32_t consumerUsage = 0, nsecs_t timestampOffset = 0,
             int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
     /**
@@ -183,12 +187,19 @@
 
     virtual status_t disconnectLocked();
 
+    status_t getEndpointUsageForSurface(uint32_t *usage,
+            const sp<Surface>& surface) const;
+    status_t configureConsumerQueueLocked();
+
+    // Consumer as the output of camera HAL
     sp<Surface> mConsumer;
 
-  private:
+    uint32_t getPresetConsumerUsage() const { return mConsumerUsage; }
 
     static const nsecs_t       kDequeueBufferTimeout   = 1000000000; // 1 sec
 
+  private:
+
     int               mTransform;
 
     virtual status_t  setTransformLocked(int transform);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 3f83c89..6a911c6 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -43,7 +43,7 @@
     /**
      * Return if the consumer configuration of this stream is deferred.
      */
-    virtual bool isConsumerConfigurationDeferred() const = 0;
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id = 0) const = 0;
 
     /**
      * Set the consumer surface to the output stream.
@@ -59,6 +59,20 @@
      *
      */
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
+    /**
+     * Notify which surfaces are requested for a particular frame number.
+     *
+     * Mulitple surfaces could share the same output stream, but a request may
+     * be only for a subset of surfaces. In this case, the
+     * Camera3OutputStreamInterface object needs to manage the output surfaces on
+     * a per request basis.
+     *
+     * If there is only one surface for this output stream, calling this
+     * function is a no-op.
+     */
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids) = 0;
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
new file mode 100644
index 0000000..b419e06
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Camera3SharedOutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
+        const std::vector<sp<Surface>>& surfaces,
+        bool hasDeferredSurface,
+        uint32_t width, uint32_t height, int format,
+        uint32_t consumerUsage, android_dataspace dataSpace,
+        camera3_stream_rotation_t rotation,
+        nsecs_t timestampOffset, int setId) :
+        Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
+                            format, dataSpace, rotation, consumerUsage,
+                            timestampOffset, setId),
+        mSurfaces(surfaces),
+        mDeferred(hasDeferredSurface) {
+}
+
+Camera3SharedOutputStream::~Camera3SharedOutputStream() {
+    disconnectLocked();
+}
+
+status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
+    status_t res = OK;
+
+    mStreamSplitter = new Camera3StreamSplitter();
+
+    uint32_t usage;
+    getEndpointUsage(&usage);
+
+    res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, mConsumer);
+    if (res != OK) {
+        ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    return res;
+}
+
+status_t Camera3SharedOutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+        const std::vector<size_t>& surface_ids) {
+    Mutex::Autolock l(mLock);
+    status_t res = OK;
+
+    if (mStreamSplitter != nullptr) {
+        res = mStreamSplitter->notifyRequestedSurfaces(surface_ids);
+    }
+
+    return res;
+}
+
+bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
+    Mutex::Autolock l(mLock);
+    return (mDeferred && surface_id >= mSurfaces.size());
+}
+
+status_t Camera3SharedOutputStream::setConsumer(sp<Surface> surface) {
+    if (surface == nullptr) {
+        ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (!mDeferred) {
+        ALOGE("%s: Current stream isn't deferred!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    mSurfaces.push_back(surface);
+
+    return mStreamSplitter->addOutput(surface, camera3_stream::max_buffers);
+}
+
+status_t Camera3SharedOutputStream::configureQueueLocked() {
+    status_t res;
+
+    if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+        return res;
+    }
+
+    res = connectStreamSplitterLocked();
+    if (res != OK) {
+        ALOGE("Cannot connect to stream splitter: %s(%d)", strerror(-res), res);
+        return res;
+    }
+
+    res = configureConsumerQueueLocked();
+    if (res != OK) {
+        ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t Camera3SharedOutputStream::disconnectLocked() {
+    status_t res;
+    res = Camera3OutputStream::disconnectLocked();
+
+    if (mStreamSplitter != nullptr) {
+        mStreamSplitter->disconnect();
+    }
+
+    return res;
+}
+
+status_t Camera3SharedOutputStream::getEndpointUsage(uint32_t *usage) const {
+
+    status_t res;
+    uint32_t u = 0;
+
+    if (mConsumer == nullptr) {
+        // Called before shared buffer queue is constructed.
+        *usage = getPresetConsumerUsage();
+
+        for (auto surface : mSurfaces) {
+            if (surface != nullptr) {
+                res = getEndpointUsageForSurface(&u, surface);
+                *usage |= u;
+            }
+        }
+    } else {
+        // Called after shared buffer queue is constructed.
+        res = getEndpointUsageForSurface(&u, mConsumer);
+        *usage |= u;
+    }
+
+    return res;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
new file mode 100644
index 0000000..1b37d7c
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+
+#include "Camera3StreamSplitter.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3SharedOutputStream :
+        public Camera3OutputStream {
+public:
+    /**
+     * Set up a stream for formats that have 2 dimensions, with multiple
+     * surfaces. A valid stream set id needs to be set to support buffer
+     * sharing between multiple streams.
+     */
+    Camera3SharedOutputStream(int id, const std::vector<sp<Surface>>& surfaces,
+            bool hasDeferredSurface, uint32_t width, uint32_t height, int format,
+            uint32_t consumerUsage, android_dataspace dataSpace,
+            camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
+    virtual ~Camera3SharedOutputStream();
+
+    virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+            const std::vector<size_t>& surface_ids);
+
+    virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
+
+    virtual status_t setConsumer(sp<Surface> consumer);
+
+private:
+    // Surfaces passed in constructor from app
+    std::vector<sp<Surface> > mSurfaces;
+
+    /**
+     * The Camera3StreamSplitter object this stream uses for stream
+     * sharing.
+     */
+    sp<Camera3StreamSplitter> mStreamSplitter;
+
+    /**
+     * Initialize stream splitter.
+     */
+    status_t connectStreamSplitterLocked();
+
+    virtual status_t configureQueueLocked();
+
+    virtual status_t disconnectLocked();
+
+    virtual status_t getEndpointUsage(uint32_t *usage) const;
+
+    bool mDeferred;
+
+}; // class Camera3SharedOutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
new file mode 100644
index 0000000..b935141
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "Camera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <gui/BufferItem.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include "Camera3StreamSplitter.h"
+
+namespace android {
+
+status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
+                                           uint32_t consumerUsage, size_t hal_max_buffers,
+                                           sp<Surface>& consumer) {
+    if (consumer != nullptr) {
+        ALOGE("%s: output Surface is not NULL", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+    status_t res = OK;
+
+    if (mOutputs.size() > 0 || mConsumer != nullptr) {
+        ALOGE("%s: StreamSplitter already connected", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    // Add output surfaces. This has to be before creating internal buffer queue
+    // in order to get max consumer side buffers.
+    for (size_t i = 0; i < surfaces.size(); i++) {
+        if (surfaces[i] != nullptr) {
+            res = addOutputLocked(surfaces[i], hal_max_buffers,
+                    OutputType::NonDeferred);
+            if (res != OK) {
+                ALOGE("%s: Failed to add output surface: %s(%d)",
+                        __FUNCTION__, strerror(-res), res);
+                return res;
+            }
+        }
+    }
+
+    // Create buffer queue for input
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+    mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage,
+                                                 mMaxConsumerBuffers);
+    if (mBufferItemConsumer == nullptr) {
+        return NO_MEMORY;
+    }
+    mConsumer->setConsumerName(getUniqueConsumerName());
+
+    mSurface = new Surface(mProducer);
+    if (mSurface == nullptr) {
+        return NO_MEMORY;
+    }
+    consumer = mSurface;
+
+    res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+    return res;
+}
+
+void Camera3StreamSplitter::disconnect() {
+    Mutex::Autolock lock(mMutex);
+
+    for (auto& output : mOutputs) {
+        output->disconnect(NATIVE_WINDOW_API_CAMERA);
+    }
+    mOutputs.clear();
+
+    if (mConsumer != nullptr) {
+        mConsumer->consumerDisconnect();
+        mConsumer.clear();
+    }
+
+    if (mBuffers.size() > 0) {
+        ALOGI("%zu buffers still being tracked", mBuffers.size());
+    }
+}
+
+Camera3StreamSplitter::~Camera3StreamSplitter() {
+    disconnect();
+}
+
+status_t Camera3StreamSplitter::addOutput(
+        sp<Surface>& outputQueue, size_t hal_max_buffers) {
+    Mutex::Autolock lock(mMutex);
+    return addOutputLocked(outputQueue, hal_max_buffers, OutputType::Deferred);
+}
+
+status_t Camera3StreamSplitter::addOutputLocked(
+        const sp<Surface>& outputQueue, size_t hal_max_buffers,
+        OutputType outputType) {
+    if (outputQueue == nullptr) {
+        ALOGE("addOutput: outputQueue must not be NULL");
+        return BAD_VALUE;
+    }
+    if (hal_max_buffers < 1) {
+        ALOGE("%s: Camera HAL requested max_buffer count: %zu, requires at least 1",
+                __FUNCTION__, hal_max_buffers);
+        return BAD_VALUE;
+    }
+
+    sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+    // Connect to the buffer producer
+    IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+    sp<OutputListener> listener(new OutputListener(this, gbp));
+    IInterface::asBinder(gbp)->linkToDeath(listener);
+    status_t status = gbp->connect(listener, NATIVE_WINDOW_API_CAMERA,
+            /* producerControlledByApp */ true, &queueBufferOutput);
+    if (status != NO_ERROR) {
+        ALOGE("addOutput: failed to connect (%d)", status);
+       return status;
+    }
+
+    // Query consumer side buffer count, and update overall buffer count
+    int maxConsumerBuffers = 0;
+    status = static_cast<ANativeWindow*>(outputQueue.get())->query(
+            outputQueue.get(),
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+    if (status != OK) {
+        ALOGE("%s: Unable to query consumer undequeued buffer count"
+              " for surface", __FUNCTION__);
+        return status;
+    }
+
+    if (maxConsumerBuffers > mMaxConsumerBuffers) {
+        if (outputType == OutputType::Deferred) {
+            ALOGE("%s: Fatal: Deferred surface has higher consumer buffer count"
+                  " %d than what's already configured %d", __FUNCTION__,
+                  maxConsumerBuffers, mMaxConsumerBuffers);
+            return BAD_VALUE;
+        }
+        mMaxConsumerBuffers = maxConsumerBuffers;
+    }
+
+    ALOGV("%s: Consumer wants %d buffers, HAL wants %zu", __FUNCTION__,
+            maxConsumerBuffers, hal_max_buffers);
+    size_t totalBufferCount = maxConsumerBuffers + hal_max_buffers;
+    status = native_window_set_buffer_count(outputQueue.get(),
+            totalBufferCount);
+    if (status != OK) {
+        ALOGE("%s: Unable to set buffer count for surface %p",
+                __FUNCTION__, outputQueue.get());
+        return status;
+    }
+
+    // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+    // We need skip these cases as timeout will disable the non-blocking (async) mode.
+    int32_t usage = 0;
+    static_cast<ANativeWindow*>(outputQueue.get())->query(
+            outputQueue.get(),
+            NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+    if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+        outputQueue->setDequeueTimeout(kDequeueBufferTimeout);
+    }
+
+    status = gbp->allowAllocation(false);
+    if (status != OK) {
+        ALOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+        return status;
+    }
+
+    // Add new entry into mOutputs
+    mOutputs.push_back(gbp);
+    return NO_ERROR;
+}
+
+String8 Camera3StreamSplitter::getUniqueConsumerName() {
+    static volatile int32_t counter = 0;
+    return String8::format("Camera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t Camera3StreamSplitter::notifyRequestedSurfaces(
+        const std::vector<size_t>& surfaces) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    mRequestedSurfaces.push_back(surfaces);
+    return OK;
+}
+
+
+void Camera3StreamSplitter::onFrameAvailable(const BufferItem& /* item */) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    // The current policy is that if any one consumer is consuming buffers too
+    // slowly, the splitter will stall the rest of the outputs by not acquiring
+    // any more buffers from the input. This will cause back pressure on the
+    // input queue, slowing down its producer.
+
+    // If there are too many outstanding buffers, we block until a buffer is
+    // released back to the input in onBufferReleased
+    while (mOutstandingBuffers >= mMaxConsumerBuffers) {
+        mReleaseCondition.wait(mMutex);
+
+        // If the splitter is abandoned while we are waiting, the release
+        // condition variable will be broadcast, and we should just return
+        // without attempting to do anything more (since the input queue will
+        // also be abandoned).
+        if (mIsAbandoned) {
+            return;
+        }
+    }
+    // If the splitter is abandoned without reaching mMaxConsumerBuffers, just
+    // return without attempting to do anything more.
+    if (mIsAbandoned) {
+        return;
+    }
+
+    ++mOutstandingBuffers;
+
+    // Acquire and detach the buffer from the input
+    BufferItem bufferItem;
+    status_t status = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "acquiring buffer from input failed (%d)", status);
+
+    ALOGV("acquired buffer %#" PRIx64 " from input",
+            bufferItem.mGraphicBuffer->getId());
+
+    status = mConsumer->detachBuffer(bufferItem.mSlot);
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "detaching buffer from input failed (%d)", status);
+
+    IGraphicBufferProducer::QueueBufferInput queueInput(
+            bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp,
+            bufferItem.mDataSpace, bufferItem.mCrop,
+            static_cast<int32_t>(bufferItem.mScalingMode),
+            bufferItem.mTransform, bufferItem.mFence);
+
+    // Attach and queue the buffer to each of the outputs
+    std::vector<std::vector<size_t> >::iterator surfaces = mRequestedSurfaces.begin();
+    if (surfaces != mRequestedSurfaces.end()) {
+
+        LOG_ALWAYS_FATAL_IF(surfaces->size() == 0,
+                "requested surface ids shouldn't be empty");
+
+        // Initialize our reference count for this buffer
+        mBuffers[bufferItem.mGraphicBuffer->getId()] =
+                std::unique_ptr<BufferTracker>(
+                new BufferTracker(bufferItem.mGraphicBuffer, surfaces->size()));
+
+        for (auto id : *surfaces) {
+
+            LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
+                    "requested surface id exceeding max registered ids");
+
+            int slot = BufferItem::INVALID_BUFFER_SLOT;
+            status = mOutputs[id]->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+            if (status == NO_INIT) {
+                // If we just discovered that this output has been abandoned, note
+                // that, decrement the reference count so that we still release this
+                // buffer eventually, and move on to the next output
+                onAbandonedLocked();
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else if (status == WOULD_BLOCK) {
+                // If the output is async, attachBuffer may return WOULD_BLOCK
+                // indicating number of dequeued buffers has reached limit. In
+                // this case, simply decrement the reference count, and move on
+                // to the next output.
+                // TODO: Do we need to report BUFFER_ERROR for this result?
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else if (status == TIMED_OUT) {
+                // If attachBuffer times out due to the value set by
+                // setDequeueTimeout, simply decrement the reference count, and
+                // move on to the next output.
+                // TODO: Do we need to report BUFFER_ERROR for this result?
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else {
+                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                        "attaching buffer to output failed (%d)", status);
+            }
+
+            IGraphicBufferProducer::QueueBufferOutput queueOutput;
+            status = mOutputs[id]->queueBuffer(slot, queueInput, &queueOutput);
+            if (status == NO_INIT) {
+                // If we just discovered that this output has been abandoned, note
+                // that, increment the release count so that we still release this
+                // buffer eventually, and move on to the next output
+                onAbandonedLocked();
+                mBuffers[bufferItem.mGraphicBuffer->getId()]->
+                        decrementReferenceCountLocked();
+                continue;
+            } else {
+                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                        "queueing buffer to output failed (%d)", status);
+            }
+
+            ALOGV("queued buffer %#" PRIx64 " to output %p",
+                    bufferItem.mGraphicBuffer->getId(), mOutputs[id].get());
+        }
+
+        mRequestedSurfaces.erase(surfaces);
+    }
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutput(
+        const sp<IGraphicBufferProducer>& from) {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+
+    sp<GraphicBuffer> buffer;
+    sp<Fence> fence;
+    status_t status = from->detachNextBuffer(&buffer, &fence);
+    if (status == NO_INIT) {
+        // If we just discovered that this output has been abandoned, note that,
+        // but we can't do anything else, since buffer is invalid
+        onAbandonedLocked();
+        return;
+    } else {
+        LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                "detaching buffer from output failed (%d)", status);
+    }
+
+    ALOGV("detached buffer %#" PRIx64 " from output %p",
+          buffer->getId(), from.get());
+
+    BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+
+    // Merge the release fence of the incoming buffer so that the fence we send
+    // back to the input includes all of the outputs' fences
+    tracker.mergeFence(fence);
+
+    // Check to see if this is the last outstanding reference to this buffer
+    size_t referenceCount = tracker.decrementReferenceCountLocked();
+    ALOGV("buffer %#" PRIx64 " reference count %zu", buffer->getId(),
+            referenceCount);
+    if (referenceCount > 0) {
+        return;
+    }
+
+    // If we've been abandoned, we can't return the buffer to the input, so just
+    // stop tracking it and move on
+    if (mIsAbandoned) {
+        mBuffers.erase(buffer->getId());
+        return;
+    }
+
+    // Attach and release the buffer back to the input
+    int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
+    status = mConsumer->attachBuffer(&consumerSlot, tracker.getBuffer());
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "attaching buffer to input failed (%d)", status);
+
+    status = mConsumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
+            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker.getMergedFence());
+    LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+            "releasing buffer to input failed (%d)", status);
+
+    ALOGV("released buffer %#" PRIx64 " to input", buffer->getId());
+
+    // We no longer need to track the buffer once it has been returned to the
+    // input
+    mBuffers.erase(buffer->getId());
+
+    // Notify any waiting onFrameAvailable calls
+    --mOutstandingBuffers;
+    mReleaseCondition.signal();
+}
+
+void Camera3StreamSplitter::onAbandonedLocked() {
+    ALOGE("one of my outputs has abandoned me");
+    if (!mIsAbandoned && mConsumer != nullptr) {
+        mConsumer->consumerDisconnect();
+    }
+    mIsAbandoned = true;
+    mReleaseCondition.broadcast();
+}
+
+Camera3StreamSplitter::OutputListener::OutputListener(
+        wp<Camera3StreamSplitter> splitter,
+        wp<IGraphicBufferProducer> output)
+      : mSplitter(splitter), mOutput(output) {}
+
+void Camera3StreamSplitter::OutputListener::onBufferReleased() {
+    sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+    sp<IGraphicBufferProducer> output = mOutput.promote();
+    if (splitter != nullptr && output != nullptr) {
+        splitter->onBufferReleasedByOutput(output);
+    }
+}
+
+void Camera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+    sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+    if (splitter != nullptr) {
+        Mutex::Autolock lock(splitter->mMutex);
+        splitter->onAbandonedLocked();
+    }
+}
+
+Camera3StreamSplitter::BufferTracker::BufferTracker(
+        const sp<GraphicBuffer>& buffer, size_t referenceCount)
+      : mBuffer(buffer), mMergedFence(Fence::NO_FENCE),
+        mReferenceCount(referenceCount) {}
+
+void Camera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+    mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+    if (mReferenceCount > 0)
+        --mReferenceCount;
+    return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
new file mode 100644
index 0000000..5a25712
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+#include <gui/BufferItemConsumer.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// Camera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class Camera3StreamSplitter : public BnConsumerListener {
+public:
+
+    // Constructor
+    Camera3StreamSplitter() = default;
+
+    // Connect to the stream splitter by creating buffer queue and connecting it
+    // with output surfaces.
+    status_t connect(const std::vector<sp<Surface> >& surfaces,
+            uint32_t consumerUsage, size_t hal_max_buffers,
+            sp<Surface>& consumer);
+
+    // addOutput adds an output BufferQueue to the splitter. The splitter
+    // connects to outputQueue as a CPU producer, and any buffers queued
+    // to the input will be queued to each output. It is assumed that all of the
+    // outputs are added before any buffers are queued on the input. If any
+    // output is abandoned by its consumer, the splitter will abandon its input
+    // queue (see onAbandoned).
+    //
+    // A return value other than NO_ERROR means that an error has occurred and
+    // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+    // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+    // of other error codes.
+    status_t addOutput(sp<Surface>& outputQueue, size_t hal_max_buffers);
+
+    // Request surfaces for a particular frame number. The requested surfaces
+    // are stored in a FIFO queue. And when the buffer becomes available from the
+    // input queue, the registered surfaces are used to decide which output is
+    // the buffer sent to.
+    status_t notifyRequestedSurfaces(const std::vector<size_t>& surfaces);
+
+    // Disconnect the buffer queue from output surfaces.
+    void disconnect();
+
+private:
+    // From IConsumerListener
+    //
+    // During this callback, we store some tracking information, detach the
+    // buffer from the input, and attach it to each of the outputs. This call
+    // can block if there are too many outstanding buffers. If it blocks, it
+    // will resume when onBufferReleasedByOutput releases a buffer back to the
+    // input.
+    void onFrameAvailable(const BufferItem& item) override;
+
+    // From IConsumerListener
+    // We don't care about released buffers because we detach each buffer as
+    // soon as we acquire it. See the comment for onBufferReleased below for
+    // some clarifying notes about the name.
+    void onBuffersReleased() override {}
+
+    // From IConsumerListener
+    // We don't care about sideband streams, since we won't be splitting them
+    void onSidebandStreamChanged() override {}
+
+    // This is the implementation of the onBufferReleased callback from
+    // IProducerListener. It gets called from an OutputListener (see below), and
+    // 'from' is which producer interface from which the callback was received.
+    //
+    // During this callback, we detach the buffer from the output queue that
+    // generated the callback, update our state tracking to see if this is the
+    // last output releasing the buffer, and if so, release it to the input.
+    // If we release the buffer to the input, we allow a blocked
+    // onFrameAvailable call to proceed.
+    void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+    // When this is called, the splitter disconnects from (i.e., abandons) its
+    // input queue and signals any waiting onFrameAvailable calls to wake up.
+    // It still processes callbacks from other outputs, but only detaches their
+    // buffers so they can continue operating until they run out of buffers to
+    // acquire. This must be called with mMutex locked.
+    void onAbandonedLocked();
+
+    // This is a thin wrapper class that lets us determine which BufferQueue
+    // the IProducerListener::onBufferReleased callback is associated with. We
+    // create one of these per output BufferQueue, and then pass the producer
+    // into onBufferReleasedByOutput above.
+    class OutputListener : public BnProducerListener,
+                           public IBinder::DeathRecipient {
+    public:
+        OutputListener(wp<Camera3StreamSplitter> splitter,
+                wp<IGraphicBufferProducer> output);
+        virtual ~OutputListener() = default;
+
+        // From IProducerListener
+        void onBufferReleased() override;
+
+        // From IBinder::DeathRecipient
+        void binderDied(const wp<IBinder>& who) override;
+
+    private:
+        wp<Camera3StreamSplitter> mSplitter;
+        wp<IGraphicBufferProducer> mOutput;
+    };
+
+    class BufferTracker {
+    public:
+        BufferTracker(const sp<GraphicBuffer>& buffer, size_t referenceCount);
+        ~BufferTracker() = default;
+
+        const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+        const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+        void mergeFence(const sp<Fence>& with);
+
+        // Returns the new value
+        // Only called while mMutex is held
+        size_t decrementReferenceCountLocked();
+
+    private:
+
+        // Disallow copying
+        BufferTracker(const BufferTracker& other);
+        BufferTracker& operator=(const BufferTracker& other);
+
+        sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+        sp<Fence> mMergedFence;
+        size_t mReferenceCount;
+    };
+
+    // A deferred output is an output being added to the splitter after
+    // connect() call, whereas a non deferred output is added within connect()
+    // call.
+    enum class OutputType { NonDeferred, Deferred };
+
+    // Must be accessed through RefBase
+    virtual ~Camera3StreamSplitter();
+
+    status_t addOutputLocked(const sp<Surface>& outputQueue,
+                             size_t hal_max_buffers, OutputType outputType);
+
+    // Get unique name for the buffer queue consumer
+    static String8 getUniqueConsumerName();
+
+    // Max consumer side buffers for deferred surface. This will be used as a
+    // lower bound for overall consumer side max buffers.
+    static const int MAX_BUFFERS_DEFERRED_OUTPUT = 2;
+    int mMaxConsumerBuffers = MAX_BUFFERS_DEFERRED_OUTPUT;
+
+    static const nsecs_t kDequeueBufferTimeout   = s2ns(1); // 1 sec
+
+    // mIsAbandoned is set to true when an output dies. Once the Camera3StreamSplitter
+    // has been abandoned, it will continue to detach buffers from other
+    // outputs, but it will disconnect from the input and not attempt to
+    // communicate with it further.
+    bool mIsAbandoned = false;
+
+    Mutex mMutex;
+    Condition mReleaseCondition;
+    int mOutstandingBuffers = 0;
+
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
+    sp<BufferItemConsumer> mBufferItemConsumer;
+    sp<Surface> mSurface;
+
+    std::vector<sp<IGraphicBufferProducer> > mOutputs;
+    // Tracking which outputs should the buffer be attached and queued
+    // to for each input buffer.
+    std::vector<std::vector<size_t> > mRequestedSurfaces;
+
+    // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+    // objects (which are mostly for counting how many outputs have released the
+    // buffer, but also contain merged release fences).
+    std::unordered_map<uint64_t, std::unique_ptr<BufferTracker> > mBuffers;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/mediacodec/minijail/minijail.cpp b/services/mediacodec/minijail/minijail.cpp
index 7926380..463f161 100644
--- a/services/mediacodec/minijail/minijail.cpp
+++ b/services/mediacodec/minijail/minijail.cpp
@@ -19,7 +19,8 @@
 
 #include <unistd.h>
 
-#include <android/log.h>
+#include <log/log.h>
+
 #include <libminijail.h>
 
 #include "minijail.h"
diff --git a/services/mediaextractor/minijail/minijail.cpp b/services/mediaextractor/minijail/minijail.cpp
index 8291633..c44d00d 100644
--- a/services/mediaextractor/minijail/minijail.cpp
+++ b/services/mediaextractor/minijail/minijail.cpp
@@ -19,7 +19,8 @@
 
 #include <unistd.h>
 
-#include <android/log.h>
+#include <log/log.h>
+
 #include <libminijail.h>
 
 #include "minijail.h"
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 74f1fe0..1b50dc3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -46,6 +46,7 @@
     libhidlbase \
     libhidltransport \
     libbase \
+    libaudiohal \
     android.hardware.broadcastradio@1.0
 endif
 
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
index 34a6db7..032d3fd 100644
--- a/services/radio/RadioHalHidl.cpp
+++ b/services/radio/RadioHalHidl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "RadioHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 #include <utils/misc.h>
 #include <system/radio_metadata.h>
@@ -333,11 +334,27 @@
 RadioHalHidl::Tuner::Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module)
     : TunerInterface(), mHalTuner(NULL), mCallback(callback), mParentModule(module)
 {
+    // Make sure the handler we are passing in only deals with const members,
+    // as it can be called on an arbitrary thread.
+    const auto& self = this;
+    HalDeathHandler::getInstance()->registerAtExitHandler(
+            this, [&self]() { self->sendHwFailureEvent(); });
 }
 
 
 RadioHalHidl::Tuner::~Tuner()
 {
+    HalDeathHandler::getInstance()->unregisterAtExitHandler(this);
+}
+
+void RadioHalHidl::Tuner::setHalTuner(sp<ITuner>& halTuner) {
+    if (mHalTuner != 0) {
+        mHalTuner->unlinkToDeath(HalDeathHandler::getInstance());
+    }
+    mHalTuner = halTuner;
+    if (mHalTuner != 0) {
+        mHalTuner->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+    }
 }
 
 void RadioHalHidl::Tuner::handleHwFailure()
@@ -347,14 +364,19 @@
     if (parentModule != 0) {
         parentModule->clearService();
     }
+    sendHwFailureEvent();
+    mHalTuner.clear();
+}
+
+void RadioHalHidl::Tuner::sendHwFailureEvent() const
+{
     radio_hal_event_t event;
     memset(&event, 0, sizeof(radio_hal_event_t));
     event.type = RADIO_EVENT_HW_FAILURE;
     onCallback(&event);
-    mHalTuner.clear();
 }
 
-void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent)
+void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent) const
 {
     if (mCallback != 0) {
         mCallback->onEvent(halEvent);
diff --git a/services/radio/RadioHalHidl.h b/services/radio/RadioHalHidl.h
index b60a95e..38e181a 100644
--- a/services/radio/RadioHalHidl.h
+++ b/services/radio/RadioHalHidl.h
@@ -78,17 +78,18 @@
             virtual Return<void> newMetadata(uint32_t channel, uint32_t subChannel,
                                          const ::android::hardware::hidl_vec<MetaData>& metadata);
 
-            void setHalTuner(sp<ITuner>& halTuner) { mHalTuner = halTuner; }
+            void setHalTuner(sp<ITuner>& halTuner);
             sp<ITuner> getHalTuner() { return mHalTuner; }
 
         private:
             virtual          ~Tuner();
 
-                    void     onCallback(radio_hal_event_t *halEvent);
+                    void     onCallback(radio_hal_event_t *halEvent) const;
                     void     handleHwFailure();
+                    void     sendHwFailureEvent() const;
 
             sp<ITuner> mHalTuner;
-            sp<TunerCallbackInterface>  mCallback;
+            const sp<TunerCallbackInterface> mCallback;
             wp<RadioHalHidl> mParentModule;
         };
 
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 63a05a6..3e7a7ce 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -48,6 +48,7 @@
     libhidlbase \
     libhidltransport \
     libbase \
+    libaudiohal \
     android.hardware.soundtrigger@2.0 \
     android.hardware.audio.common@2.0
 endif
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
index eb9d38d..7cc8a2b 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.cpp
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "SoundTriggerHalHidl"
 //#define LOG_NDEBUG 0
 
+#include <media/audiohal/hidl/HalDeathHandler.h>
 #include <utils/Log.h>
 #include "SoundTriggerHalHidl.h"
 #include <hwbinder/IPCThreadState.h>
@@ -59,7 +60,7 @@
         }
     } else {
         ALOGE("getProperties error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     ALOGI("getProperties ret %d", ret);
     return ret;
@@ -132,7 +133,7 @@
         }
     } else {
         ALOGE("loadSoundModel error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
 
     return ret;
@@ -159,7 +160,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("unloadSoundModel error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
 
     return hidlReturn;
@@ -197,7 +198,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("startRecognition error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     return hidlReturn;
 }
@@ -223,7 +224,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("stopRecognition error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     return hidlReturn;
 }
@@ -243,7 +244,7 @@
 
     if (!hidlReturn.isOk()) {
         ALOGE("stopAllRecognitions error %s", hidlReturn.description().c_str());
-        return UNKNOWN_ERROR;
+        return FAILED_TRANSACTION;
     }
     return hidlReturn;
 }
@@ -267,6 +268,9 @@
         std::string serviceName = "sound_trigger.";
         serviceName.append(mModuleName);
         mISoundTrigger = ISoundTriggerHw::getService(serviceName);
+        if (mISoundTrigger != 0) {
+            mISoundTrigger->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+        }
     }
     return mISoundTrigger;
 }