Merge "Camera: Handle RESULT_ERROR followed by BUFFER_ERROR" into oc-mr1-dev
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index e763934..153fce3 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -46,6 +46,8 @@
     if (status != NO_ERROR) goto error;
     status = parcel->writeInt32((int32_t) getFormat());
     if (status != NO_ERROR) goto error;
+    status = parcel->writeInt32((int32_t) getDirection());
+    if (status != NO_ERROR) goto error;
     status = parcel->writeInt32(getBufferCapacity());
     if (status != NO_ERROR) goto error;
     return NO_ERROR;
@@ -73,9 +75,12 @@
     setFormat(value);
     status = parcel->readInt32(&value);
     if (status != NO_ERROR) goto error;
+    setDirection((aaudio_direction_t) value);
+    status = parcel->readInt32(&value);
+    if (status != NO_ERROR) goto error;
     setBufferCapacity(value);
     return NO_ERROR;
 error:
     ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
     return status;
-}
\ No newline at end of file
+}
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index abdcf5b..1200ab2 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -46,9 +46,6 @@
     status_t status = parcel->writeInt32((int32_t) mUserId);
     if (status != NO_ERROR) goto error;
 
-    status = parcel->writeInt32((int32_t) mDirection);
-    if (status != NO_ERROR) goto error;
-
     status = parcel->writeBool(mSharingModeMatchRequired);
     if (status != NO_ERROR) goto error;
 
@@ -71,10 +68,6 @@
     if (status != NO_ERROR) goto error;
     mUserId = (uid_t) temp;
 
-    status = parcel->readInt32(&temp);
-    if (status != NO_ERROR) goto error;
-    mDirection = (aaudio_direction_t) temp;
-
     status = parcel->readBool(&mSharingModeMatchRequired);
     if (status != NO_ERROR) goto error;
 
@@ -98,7 +91,6 @@
 void AAudioStreamRequest::dump() const {
     ALOGD("AAudioStreamRequest mUserId    = %d", mUserId);
     ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
-    ALOGD("AAudioStreamRequest mDirection = %d", mDirection);
     ALOGD("AAudioStreamRequest mSharingModeMatchRequired = %d", mSharingModeMatchRequired);
     ALOGD("AAudioStreamRequest mInService = %d", mInService);
     mConfiguration.dump();
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index b0fa96a..492f69d 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -52,14 +52,6 @@
         mProcessId = processId;
     }
 
-    aaudio_direction_t getDirection() const {
-        return mDirection;
-    }
-
-    void setDirection(aaudio_direction_t direction) {
-        mDirection = direction;
-    }
-
     bool isSharingModeMatchRequired() const {
         return mSharingModeMatchRequired;
     }
@@ -94,9 +86,8 @@
 
 protected:
     AAudioStreamConfiguration  mConfiguration;
-    uid_t                      mUserId;
-    pid_t                      mProcessId;
-    aaudio_direction_t         mDirection;
+    uid_t                      mUserId = (uid_t) -1;
+    pid_t                      mProcessId = (pid_t) -1;
     bool                       mSharingModeMatchRequired = false;
     bool                       mInService = false; // Stream opened by AAudioservice
 };
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index bdebf8f..036d931 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -105,13 +105,13 @@
     // Build the request to send to the server.
     request.setUserId(getuid());
     request.setProcessId(getpid());
-    request.setDirection(getDirection());
     request.setSharingModeMatchRequired(isSharingModeMatchRequired());
     request.setInService(mInService);
 
     request.getConfiguration().setDeviceId(getDeviceId());
     request.getConfiguration().setSampleRate(getSampleRate());
     request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
+    request.getConfiguration().setDirection(getDirection());
     request.getConfiguration().setSharingMode(getSharingMode());
 
     request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
@@ -357,6 +357,7 @@
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
+
     return mServiceInterface.startClient(mServiceStreamHandle, client, clientHandle);
 }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 13cf16c..3523294 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -93,6 +93,10 @@
 
     aaudio_result_t stopClient(audio_port_handle_t clientHandle);
 
+    aaudio_handle_t getServiceHandle() const {
+        return mServiceStreamHandle;
+    }
+
 protected:
 
     aaudio_result_t processData(void *buffer,
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 65c2b46..82445e7 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -34,6 +34,16 @@
 AAudioStreamParameters::AAudioStreamParameters() {}
 AAudioStreamParameters::~AAudioStreamParameters() {}
 
+void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
+    mSamplesPerFrame = other.mSamplesPerFrame;
+    mSampleRate      = other.mSampleRate;
+    mDeviceId        = other.mDeviceId;
+    mSharingMode     = other.mSharingMode;
+    mAudioFormat     = other.mAudioFormat;
+    mDirection       = other.mDirection;
+    mBufferCapacity  = other.mBufferCapacity;
+}
+
 aaudio_result_t AAudioStreamParameters::validate() const {
     if (mSamplesPerFrame != AAUDIO_UNSPECIFIED
         && (mSamplesPerFrame < SAMPLES_PER_FRAME_MIN || mSamplesPerFrame > SAMPLES_PER_FRAME_MAX)) {
@@ -78,6 +88,16 @@
         return AAUDIO_ERROR_OUT_OF_RANGE;
     }
 
+    switch (mDirection) {
+        case AAUDIO_DIRECTION_INPUT:
+        case AAUDIO_DIRECTION_OUTPUT:
+            break; // valid
+        default:
+            ALOGE("AAudioStreamParameters: direction not valid = %d", mDirection);
+            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            // break;
+    }
+
     return AAUDIO_OK;
 }
 
@@ -87,5 +107,7 @@
     ALOGD("AAudioStreamParameters mSamplesPerFrame = %d", mSamplesPerFrame);
     ALOGD("AAudioStreamParameters mSharingMode     = %d", (int)mSharingMode);
     ALOGD("AAudioStreamParameters mAudioFormat     = %d", (int)mAudioFormat);
+    ALOGD("AAudioStreamParameters mDirection       = %d", mDirection);
     ALOGD("AAudioStreamParameters mBufferCapacity  = %d", mBufferCapacity);
-}
\ No newline at end of file
+}
+
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 97379cc..5e67c93 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #include <aaudio/AAudio.h>
+#include <utility/AAudioUtilities.h>
 
 namespace aaudio {
 
@@ -79,6 +80,24 @@
         mBufferCapacity = frames;
     }
 
+    aaudio_direction_t getDirection() const {
+        return mDirection;
+    }
+
+    void setDirection(aaudio_direction_t direction) {
+        mDirection = direction;
+    }
+
+    int32_t calculateBytesPerFrame() const {
+        return getSamplesPerFrame() * AAudioConvert_formatToSizeInBytes(getFormat());
+    }
+
+    /**
+     * Copy variables defined in other AAudioStreamParameters instance to this one.
+     * @param other
+     */
+    void copyFrom(const AAudioStreamParameters &other);
+
     virtual aaudio_result_t validate() const;
 
     void dump() const;
@@ -89,9 +108,10 @@
     int32_t                    mDeviceId        = AAUDIO_UNSPECIFIED;
     aaudio_sharing_mode_t      mSharingMode     = AAUDIO_SHARING_MODE_SHARED;
     aaudio_format_t            mAudioFormat     = AAUDIO_FORMAT_UNSPECIFIED;
+    aaudio_direction_t         mDirection       = AAUDIO_DIRECTION_OUTPUT;
     int32_t                    mBufferCapacity  = AAUDIO_UNSPECIFIED;
 };
 
 } /* namespace aaudio */
 
-#endif //AAUDIO_STREAM_PARAMETERS_H
\ No newline at end of file
+#endif //AAUDIO_STREAM_PARAMETERS_H
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 43a1ef1..09ebb3e 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -184,16 +184,6 @@
         return result;
     }
 
-    switch (mDirection) {
-        case AAUDIO_DIRECTION_INPUT:
-        case AAUDIO_DIRECTION_OUTPUT:
-            break; // valid
-        default:
-            ALOGE("AudioStreamBuilder: direction not valid = %d", mDirection);
-            return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-            // break;
-    }
-
     switch (mPerformanceMode) {
         case AAUDIO_PERFORMANCE_MODE_NONE:
         case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 6e548b1..a43cfa8 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -35,15 +35,6 @@
 
     ~AudioStreamBuilder();
 
-    aaudio_direction_t getDirection() const {
-        return mDirection;
-    }
-
-    AudioStreamBuilder* setDirection(aaudio_direction_t direction) {
-        mDirection = direction;
-        return this;
-    }
-
     bool isSharingModeMatchRequired() const {
         return mSharingModeMatchRequired;
     }
@@ -113,7 +104,6 @@
 
 private:
     bool                       mSharingModeMatchRequired = false; // must match sharing mode requested
-    aaudio_direction_t         mDirection = AAUDIO_DIRECTION_OUTPUT;
     aaudio_performance_mode_t  mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
 
     AAudioStream_dataCallback  mDataCallbackProc = nullptr;  // external callback functions
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 1f188f3..2a74512 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -105,11 +105,17 @@
     ProfileLevel profileLevel;
     profileLevel.mProfile = profile;
     profileLevel.mLevel = level;
-    mProfileLevels.push_back(profileLevel);
+    if (mProfileLevelsSorted.indexOf(profileLevel) < 0) {
+        mProfileLevels.push_back(profileLevel);
+        mProfileLevelsSorted.add(profileLevel);
+    }
 }
 
 void MediaCodecInfo::CapabilitiesBuilder::addColorFormat(uint32_t format) {
-    mColorFormats.push(format);
+    if (mColorFormatsSorted.indexOf(format) < 0) {
+        mColorFormats.push(format);
+        mColorFormatsSorted.add(format);
+    }
 }
 
 void MediaCodecInfo::CapabilitiesBuilder::addFlags(uint32_t flags) {
diff --git a/media/libmedia/include/media/IOMX.h b/media/libmedia/include/media/IOMX.h
index d868860..e69c02d 100644
--- a/media/libmedia/include/media/IOMX.h
+++ b/media/libmedia/include/media/IOMX.h
@@ -31,6 +31,7 @@
 
 #include <media/openmax/OMX_Core.h>
 #include <media/openmax/OMX_Video.h>
+#include <media/openmax/OMX_VideoExt.h>
 
 namespace android {
 
diff --git a/media/libmedia/include/media/MediaCodecInfo.h b/media/libmedia/include/media/MediaCodecInfo.h
index 6b50f22..ef641d2 100644
--- a/media/libmedia/include/media/MediaCodecInfo.h
+++ b/media/libmedia/include/media/MediaCodecInfo.h
@@ -40,6 +40,9 @@
     struct ProfileLevel {
         uint32_t mProfile;
         uint32_t mLevel;
+        bool operator <(const ProfileLevel &o) const {
+            return mProfile < o.mProfile || (mProfile == o.mProfile && mLevel < o.mLevel);
+        }
     };
 
     struct Capabilities : public RefBase {
@@ -61,7 +64,9 @@
 
     protected:
         Vector<ProfileLevel> mProfileLevels;
+        SortedVector<ProfileLevel> mProfileLevelsSorted;
         Vector<uint32_t> mColorFormats;
+        SortedVector<uint32_t> mColorFormatsSorted;
         uint32_t mFlags;
         sp<AMessage> mDetails;
 
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 63ad0e0..0e60b2e 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -4173,11 +4173,12 @@
 // static
 int /* OMX_VIDEO_AVCLEVELTYPE */ ACodec::getAVCLevelFor(
         int width, int height, int rate, int bitrate,
-        OMX_VIDEO_AVCPROFILETYPE profile) {
+        OMX_VIDEO_AVCPROFILEEXTTYPE profile) {
     // convert bitrate to main/baseline profile kbps equivalent
-    switch (profile) {
+    switch ((uint32_t)profile) {
         case OMX_VIDEO_AVCProfileHigh10:
             bitrate = divUp(bitrate, 3000); break;
+        case OMX_VIDEO_AVCProfileConstrainedHigh:
         case OMX_VIDEO_AVCProfileHigh:
             bitrate = divUp(bitrate, 1250); break;
         default:
@@ -8262,6 +8263,17 @@
             }
             builder->addProfileLevel(param.eProfile, param.eLevel);
 
+            // AVC components may not list the constrained profiles explicitly, but
+            // decoders that support a profile also support its constrained version.
+            // Encoders must explicitly support constrained profiles.
+            if (!isEncoder && mime.equalsIgnoreCase(MEDIA_MIMETYPE_VIDEO_AVC)) {
+                if (param.eProfile == OMX_VIDEO_AVCProfileHigh) {
+                    builder->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedHigh, param.eLevel);
+                } else if (param.eProfile == OMX_VIDEO_AVCProfileBaseline) {
+                    builder->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedBaseline, param.eLevel);
+                }
+            }
+
             if (index == kMaxIndicesToCheck) {
                 ALOGW("[%s] stopping checking profiles after %u: %x/%x",
                         name.c_str(), index,
@@ -8275,7 +8287,6 @@
         OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
         InitOMXParams(&portFormat);
         portFormat.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
-        Vector<uint32_t> supportedColors; // shadow copy to check for duplicates
         for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
             portFormat.nIndex = index;
             status_t err = omxNode->getParameter(
@@ -8289,19 +8300,8 @@
             if (IsFlexibleColorFormat(
                     omxNode, portFormat.eColorFormat, false /* usingNativeWindow */,
                     &flexibleEquivalent)) {
-                bool marked = false;
-                for (size_t i = 0; i < supportedColors.size(); ++i) {
-                    if (supportedColors[i] == flexibleEquivalent) {
-                        marked = true;
-                        break;
-                    }
-                }
-                if (!marked) {
-                    supportedColors.push(flexibleEquivalent);
-                    builder->addColorFormat(flexibleEquivalent);
-                }
+                builder->addColorFormat(flexibleEquivalent);
             }
-            supportedColors.push(portFormat.eColorFormat);
             builder->addColorFormat(portFormat.eColorFormat);
 
             if (index == kMaxIndicesToCheck) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index a3bda5d..3ef8f2a 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -237,6 +237,11 @@
     OMX_VIDEO_AVCPROFILETYPE codecProfile;
     OMX_VIDEO_AVCLEVELTYPE codecLevel;
     if (profiles.map(profile, &codecProfile)) {
+        if (profile == 66 && (constraints & 0x40)) {
+            codecProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedBaseline;
+        } else if (profile == 100 && (constraints & 0x0C) == 0x0C) {
+            codecProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedHigh;
+        }
         format->setInt32("profile", codecProfile);
         if (levels.map(level, &codecLevel)) {
             // for 9 && 11 decide level based on profile and constraint_set3 flag
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 248ab6d..c342b6c 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -48,10 +48,14 @@
         (IVD_CONTROL_API_COMMAND_TYPE_T)IH264D_CMD_CTL_SET_NUM_CORES
 
 static const CodecProfileLevel kProfileLevels[] = {
+    { OMX_VIDEO_AVCProfileConstrainedBaseline, OMX_VIDEO_AVCLevel52 },
+
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel52 },
 
     { OMX_VIDEO_AVCProfileMain,     OMX_VIDEO_AVCLevel52 },
 
+    { OMX_VIDEO_AVCProfileConstrainedHigh,     OMX_VIDEO_AVCLevel52 },
+
     { OMX_VIDEO_AVCProfileHigh,     OMX_VIDEO_AVCLevel52 },
 };
 
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 326207b..ce7efba 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -74,33 +74,11 @@
 };
 
 static const CodecProfileLevel kProfileLevels[] = {
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1b },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel11 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel12 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel13 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel2  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel21 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel22 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel3  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel31 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel32 },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel4  },
-    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel41 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel1  },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel1b },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel11 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel12 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel13 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel2  },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel21 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel22 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel3  },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel31 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel32 },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel4  },
-    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel41 },
+    { OMX_VIDEO_AVCProfileConstrainedBaseline, OMX_VIDEO_AVCLevel41 },
 
+    { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel41 },
+
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel41 },
 };
 
 static size_t GetCPUCoreCount() {
@@ -962,7 +940,8 @@
                 return OMX_ErrorUndefined;
             }
 
-            avcParams->eProfile = OMX_VIDEO_AVCProfileBaseline;
+            // TODO: maintain profile
+            avcParams->eProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedBaseline;
             avcParams->eLevel = omxLevel;
             avcParams->nRefFrames = 1;
             avcParams->bUseHadamard = OMX_TRUE;
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 411a251..39b67ab 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -31,20 +31,12 @@
 namespace android {
 
 static const CodecProfileLevel kM4VProfileLevels[] = {
-    { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level0 },
-    { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level0b },
-    { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level1 },
-    { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level2 },
     { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level3 },
 };
 
 static const CodecProfileLevel kH263ProfileLevels[] = {
-    { OMX_VIDEO_H263ProfileBaseline, OMX_VIDEO_H263Level10 },
-    { OMX_VIDEO_H263ProfileBaseline, OMX_VIDEO_H263Level20 },
     { OMX_VIDEO_H263ProfileBaseline, OMX_VIDEO_H263Level30 },
     { OMX_VIDEO_H263ProfileBaseline, OMX_VIDEO_H263Level45 },
-    { OMX_VIDEO_H263ProfileISWV2,    OMX_VIDEO_H263Level10 },
-    { OMX_VIDEO_H263ProfileISWV2,    OMX_VIDEO_H263Level20 },
     { OMX_VIDEO_H263ProfileISWV2,    OMX_VIDEO_H263Level30 },
     { OMX_VIDEO_H263ProfileISWV2,    OMX_VIDEO_H263Level45 },
 };
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index d049df5..b9f48c4 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -94,7 +94,8 @@
     // some OMX components as auto level, and by others as invalid level.
     static int /* OMX_VIDEO_AVCLEVELTYPE */ getAVCLevelFor(
             int width, int height, int rate, int bitrate,
-            OMX_VIDEO_AVCPROFILETYPE profile = OMX_VIDEO_AVCProfileBaseline);
+            OMX_VIDEO_AVCPROFILEEXTTYPE profile =
+                (OMX_VIDEO_AVCPROFILEEXTTYPE)OMX_VIDEO_AVCProfileBaseline);
 
     // Quirk still supported, even though deprecated
     enum Quirks {
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index ec2f5b9..f41219e 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -26,6 +26,10 @@
 #include <utility/AAudioUtilities.h>
 
 #include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpointShared.h"
+#include "AAudioServiceEndpointMMAP.h"
+#include "AAudioServiceEndpointCapture.h"
+#include "AAudioServiceEndpointPlay.h"
 
 using namespace android;
 using namespace aaudio;
@@ -34,159 +38,241 @@
 
 AAudioEndpointManager::AAudioEndpointManager()
         : Singleton<AAudioEndpointManager>()
-        , mInputs()
-        , mOutputs() {
+        , mSharedStreams()
+        , mExclusiveStreams() {
 }
 
 std::string AAudioEndpointManager::dump() const {
     std::stringstream result;
-    const bool isLocked = AAudio_tryUntilTrue(
-            [this]()->bool { return mLock.try_lock(); } /* f */,
-            50 /* times */,
-            20 /* sleepMs */);
-    if (!isLocked) {
-        result << "EndpointManager may be deadlocked\n";
-    }
+    int index = 0;
 
     result << "AAudioEndpointManager:" << "\n";
-    size_t inputs = mInputs.size();
-    result << "Input Endpoints: " << inputs << "\n";
-    for (const auto &input : mInputs) {
-        result << "  Input: " << input->dump() << "\n";
+
+    const bool isSharedLocked = AAudio_tryUntilTrue(
+            [this]()->bool { return mSharedLock.try_lock(); } /* f */,
+            50 /* times */,
+            20 /* sleepMs */);
+    if (!isSharedLocked) {
+        result << "AAudioEndpointManager Shared may be deadlocked\n";
     }
 
-    size_t outputs = mOutputs.size();
-    result << "Output Endpoints: " << outputs << "\n";
-    for (const auto &output : mOutputs) {
-        result << "  Output: " << output->dump() << "\n";
+    {
+        const bool isExclusiveLocked = AAudio_tryUntilTrue(
+                [this]() -> bool { return mExclusiveLock.try_lock(); } /* f */,
+                50 /* times */,
+                20 /* sleepMs */);
+        if (!isExclusiveLocked) {
+            result << "AAudioEndpointManager Exclusive may be deadlocked\n";
+        }
+
+        result << "Exclusive MMAP Endpoints: " << mExclusiveStreams.size() << "\n";
+        index = 0;
+        for (const auto &output : mExclusiveStreams) {
+            result << "  #" << index++ << ":";
+            result << output->dump() << "\n";
+        }
+
+        if (isExclusiveLocked) {
+            mExclusiveLock.unlock();
+        }
     }
 
-    if (isLocked) {
-        mLock.unlock();
+    result << "Shared Endpoints: " << mSharedStreams.size() << "\n";
+    index = 0;
+    for (const auto &input : mSharedStreams) {
+        result << "  #" << index++ << ":";
+        result << input->dump() << "\n";
+    }
+
+    if (isSharedLocked) {
+        mSharedLock.unlock();
     }
     return result.str();
 }
 
-AAudioServiceEndpoint *AAudioEndpointManager::openEndpoint(AAudioService &audioService,
-        const AAudioStreamConfiguration& configuration, aaudio_direction_t direction) {
-    AAudioServiceEndpoint *endpoint = nullptr;
-    AAudioServiceEndpointCapture *capture = nullptr;
-    AAudioServiceEndpointPlay *player = nullptr;
-    std::lock_guard<std::mutex> lock(mLock);
+
+// Try to find an existing endpoint.
+sp<AAudioServiceEndpoint> AAudioEndpointManager::findExclusiveEndpoint_l(
+        const AAudioStreamConfiguration &configuration) {
+    sp<AAudioServiceEndpoint> endpoint;
+    for (const auto ep : mExclusiveStreams) {
+        if (ep->matches(configuration)) {
+            endpoint = ep;
+            break;
+        }
+    }
+
+    ALOGD("AAudioEndpointManager.findExclusiveEndpoint_l(), found %p for device = %d",
+          endpoint.get(), configuration.getDeviceId());
+    return endpoint;
+}
+
+// Try to find an existing endpoint.
+sp<AAudioServiceEndpointShared> AAudioEndpointManager::findSharedEndpoint_l(
+        const AAudioStreamConfiguration &configuration) {
+    sp<AAudioServiceEndpointShared> endpoint;
+    for (const auto ep  : mSharedStreams) {
+        if (ep->matches(configuration)) {
+            endpoint = ep;
+            break;
+        }
+    }
+
+    ALOGD("AAudioEndpointManager.findSharedEndpoint_l(), found %p for device = %d",
+          endpoint.get(), configuration.getDeviceId());
+    return endpoint;
+}
+
+sp<AAudioServiceEndpoint> AAudioEndpointManager::openEndpoint(AAudioService &audioService,
+                                        const aaudio::AAudioStreamRequest &request,
+                                        aaudio_sharing_mode_t sharingMode) {
+    if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+        return openExclusiveEndpoint(audioService, request);
+    } else {
+        return openSharedEndpoint(audioService, request);
+    }
+}
+
+sp<AAudioServiceEndpoint> AAudioEndpointManager::openExclusiveEndpoint(
+        AAudioService &aaudioService __unused,
+        const aaudio::AAudioStreamRequest &request) {
+
+    std::lock_guard<std::mutex> lock(mExclusiveLock);
+
+    const AAudioStreamConfiguration &configuration = request.getConstantConfiguration();
 
     // Try to find an existing endpoint.
+    sp<AAudioServiceEndpoint> endpoint = findExclusiveEndpoint_l(configuration);
 
+    // If we find an existing one then this one cannot be exclusive.
+    if (endpoint.get() != nullptr) {
+        ALOGE("AAudioEndpointManager.openExclusiveEndpoint() already in use");
+        // Already open so do not allow a second stream.
+        return nullptr;
+    } else {
+        sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP();
+        ALOGE("AAudioEndpointManager.openEndpoint(),created MMAP %p", endpointMMap.get());
+        endpoint = endpointMMap;
 
-
-    switch (direction) {
-        case AAUDIO_DIRECTION_INPUT:
-            for (AAudioServiceEndpoint *ep : mInputs) {
-                if (ep->matches(configuration)) {
-                    endpoint = ep;
-                    break;
-                }
-            }
-            break;
-        case AAUDIO_DIRECTION_OUTPUT:
-            for (AAudioServiceEndpoint *ep : mOutputs) {
-                if (ep->matches(configuration)) {
-                    endpoint = ep;
-                    break;
-                }
-            }
-            break;
-        default:
-            assert(false); // There are only two possible directions.
-            break;
-    }
-    ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
-          endpoint, configuration.getDeviceId(), (int)direction);
-
-    // If we can't find an existing one then open a new one.
-    if (endpoint == nullptr) {
-        // we must call openStream with audioserver identity
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        switch(direction) {
-            case AAUDIO_DIRECTION_INPUT:
-                capture = new AAudioServiceEndpointCapture(audioService);
-                endpoint = capture;
-                break;
-            case AAUDIO_DIRECTION_OUTPUT:
-                player = new AAudioServiceEndpointPlay(audioService);
-                endpoint = player;
-                break;
-            default:
-                break;
+        aaudio_result_t result = endpoint->open(request);
+        if (result != AAUDIO_OK) {
+            ALOGE("AAudioEndpointManager.openEndpoint(), open failed");
+            endpoint.clear();
+        } else {
+            mExclusiveStreams.push_back(endpointMMap);
         }
 
-        if (endpoint != nullptr) {
-            aaudio_result_t result = endpoint->open(configuration);
-            if (result != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::findEndpoint(), open failed");
-                delete endpoint;
-                endpoint = nullptr;
-            } else {
-                switch(direction) {
-                    case AAUDIO_DIRECTION_INPUT:
-                        mInputs.push_back(capture);
-                        break;
-                    case AAUDIO_DIRECTION_OUTPUT:
-                        mOutputs.push_back(player);
-                        break;
-                    default:
-                        break;
-                }
-            }
-        }
-        ALOGD("AAudioEndpointManager::openEndpoint(), created %p for device = %d, dir = %d",
-              endpoint, configuration.getDeviceId(), (int)direction);
-        IPCThreadState::self()->restoreCallingIdentity(token);
+        ALOGD("AAudioEndpointManager.openEndpoint(), created %p for device = %d",
+              endpoint.get(), configuration.getDeviceId());
     }
 
-    if (endpoint != nullptr) {
-        ALOGD("AAudioEndpointManager::openEndpoint(), sampleRate = %d, framesPerBurst = %d",
-              endpoint->getSampleRate(), endpoint->getFramesPerBurst());
+    if (endpoint.get() != nullptr) {
         // Increment the reference count under this lock.
-        endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
+        endpoint->setOpenCount(endpoint->getOpenCount() + 1);
     }
     return endpoint;
 }
 
-void AAudioEndpointManager::closeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
-    std::lock_guard<std::mutex> lock(mLock);
-    if (serviceEndpoint == nullptr) {
-        return;
-    }
+sp<AAudioServiceEndpoint> AAudioEndpointManager::openSharedEndpoint(
+        AAudioService &aaudioService,
+        const aaudio::AAudioStreamRequest &request) {
 
-    // Decrement the reference count under this lock.
-    int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
-    serviceEndpoint->setReferenceCount(newRefCount);
-    ALOGD("AAudioEndpointManager::closeEndpoint(%p) newRefCount = %d",
-          serviceEndpoint, newRefCount);
+    std::lock_guard<std::mutex> lock(mSharedLock);
 
-    // If no longer in use then close and delete it.
-    if (newRefCount <= 0) {
-        aaudio_direction_t direction = serviceEndpoint->getDirection();
-        // Track endpoints based on requested deviceId because UNSPECIFIED
-        // can change to a specific device after opening.
-        int32_t deviceId = serviceEndpoint->getRequestedDeviceId();
+    const AAudioStreamConfiguration &configuration = request.getConstantConfiguration();
+    aaudio_direction_t direction = configuration.getDirection();
 
+    // Try to find an existing endpoint.
+    sp<AAudioServiceEndpointShared> endpoint = findSharedEndpoint_l(configuration);
+
+    // If we can't find an existing one then open a new one.
+    if (endpoint.get() == nullptr) {
+        // we must call openStream with audioserver identity
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
         switch (direction) {
             case AAUDIO_DIRECTION_INPUT:
-                mInputs.erase(
-                  std::remove(mInputs.begin(), mInputs.end(), serviceEndpoint), mInputs.end());
+                endpoint = new AAudioServiceEndpointCapture(aaudioService);
                 break;
             case AAUDIO_DIRECTION_OUTPUT:
-                mOutputs.erase(
-                  std::remove(mOutputs.begin(), mOutputs.end(), serviceEndpoint), mOutputs.end());
+                endpoint = new AAudioServiceEndpointPlay(aaudioService);
                 break;
             default:
                 break;
         }
 
+        if (endpoint.get() != nullptr) {
+            aaudio_result_t result = endpoint->open(request);
+            if (result != AAUDIO_OK) {
+                ALOGE("AAudioEndpointManager.openEndpoint(), open failed");
+                endpoint.clear();
+            } else {
+                mSharedStreams.push_back(endpoint);
+            }
+        }
+        ALOGD("AAudioEndpointManager.openSharedEndpoint(), created %p for device = %d, dir = %d",
+              endpoint.get(), configuration.getDeviceId(), (int)direction);
+        IPCThreadState::self()->restoreCallingIdentity(token);
+    }
+
+    if (endpoint.get() != nullptr) {
+        // Increment the reference count under this lock.
+        endpoint->setOpenCount(endpoint->getOpenCount() + 1);
+    }
+    return endpoint;
+}
+
+void AAudioEndpointManager::closeEndpoint(sp<AAudioServiceEndpoint>serviceEndpoint) {
+    if (serviceEndpoint->getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+        return closeExclusiveEndpoint(serviceEndpoint);
+    } else {
+        return closeSharedEndpoint(serviceEndpoint);
+    }
+}
+
+void AAudioEndpointManager::closeExclusiveEndpoint(sp<AAudioServiceEndpoint> serviceEndpoint) {
+    if (serviceEndpoint.get() == nullptr) {
+        return;
+    }
+
+    // Decrement the reference count under this lock.
+    std::lock_guard<std::mutex> lock(mExclusiveLock);
+    int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
+    serviceEndpoint->setOpenCount(newRefCount);
+    ALOGD("AAudioEndpointManager::closeExclusiveEndpoint(%p) newRefCount = %d",
+          serviceEndpoint.get(), newRefCount);
+
+    // If no longer in use then close and delete it.
+    if (newRefCount <= 0) {
+        mExclusiveStreams.erase(
+                std::remove(mExclusiveStreams.begin(), mExclusiveStreams.end(), serviceEndpoint),
+                mExclusiveStreams.end());
+
         serviceEndpoint->close();
-        ALOGD("AAudioEndpointManager::closeEndpoint() delete %p for device %d, dir = %d",
-              serviceEndpoint, deviceId, (int)direction);
-        delete serviceEndpoint;
+        ALOGD("AAudioEndpointManager::closeExclusiveEndpoint() %p for device %d",
+              serviceEndpoint.get(), serviceEndpoint->getDeviceId());
+    }
+}
+
+void AAudioEndpointManager::closeSharedEndpoint(sp<AAudioServiceEndpoint> serviceEndpoint) {
+    if (serviceEndpoint.get() == nullptr) {
+        return;
+    }
+
+    // Decrement the reference count under this lock.
+    std::lock_guard<std::mutex> lock(mSharedLock);
+    int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
+    serviceEndpoint->setOpenCount(newRefCount);
+    ALOGD("AAudioEndpointManager::closeSharedEndpoint(%p) newRefCount = %d",
+          serviceEndpoint.get(), newRefCount);
+
+    // If no longer in use then close and delete it.
+    if (newRefCount <= 0) {
+        mSharedStreams.erase(
+                std::remove(mSharedStreams.begin(), mSharedStreams.end(), serviceEndpoint),
+                mSharedStreams.end());
+
+        serviceEndpoint->close();
+        ALOGD("AAudioEndpointManager::closeSharedEndpoint() %p for device %d",
+              serviceEndpoint.get(), serviceEndpoint->getDeviceId());
     }
 }
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index 2511b2f..32c8454 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -24,11 +24,12 @@
 #include "binding/AAudioServiceMessage.h"
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceEndpointCapture.h"
+#include "AAudioServiceEndpointMMAP.h"
 #include "AAudioServiceEndpointPlay.h"
 
 namespace aaudio {
 
-class AAudioEndpointManager : public android::Singleton<AAudioEndpointManager>{
+class AAudioEndpointManager : public android::Singleton<AAudioEndpointManager> {
 public:
     AAudioEndpointManager();
     ~AAudioEndpointManager() = default;
@@ -49,22 +50,42 @@
      * Find a service endpoint for the given deviceId and direction.
      * If an endpoint does not already exist then try to create one.
      *
-     * @param deviceId
-     * @param direction
-     * @return endpoint or nullptr
+     * @param audioService
+     * @param request
+     * @param sharingMode
+     * @return endpoint or null
      */
-    AAudioServiceEndpoint *openEndpoint(android::AAudioService &audioService,
-                                        const AAudioStreamConfiguration& configuration,
-                                        aaudio_direction_t direction);
+    android::sp<AAudioServiceEndpoint> openEndpoint(android::AAudioService &audioService,
+                                        const aaudio::AAudioStreamRequest &request,
+                                        aaudio_sharing_mode_t sharingMode);
 
-    void closeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
+    void closeEndpoint(android::sp<AAudioServiceEndpoint> serviceEndpoint);
 
 private:
+    android::sp<AAudioServiceEndpoint> openExclusiveEndpoint(android::AAudioService &aaudioService,
+                                                 const aaudio::AAudioStreamRequest &request);
 
-    mutable std::mutex mLock;
+    android::sp<AAudioServiceEndpoint> openSharedEndpoint(android::AAudioService &aaudioService,
+                                              const aaudio::AAudioStreamRequest &request);
 
-    std::vector<AAudioServiceEndpointCapture *> mInputs;
-    std::vector<AAudioServiceEndpointPlay *> mOutputs;
+    android::sp<AAudioServiceEndpoint> findExclusiveEndpoint_l(
+            const AAudioStreamConfiguration& configuration);
+
+    android::sp<AAudioServiceEndpointShared> findSharedEndpoint_l(
+            const AAudioStreamConfiguration& configuration);
+
+    void closeExclusiveEndpoint(android::sp<AAudioServiceEndpoint> serviceEndpoint);
+    void closeSharedEndpoint(android::sp<AAudioServiceEndpoint> serviceEndpoint);
+
+    // Use separate locks because opening a Shared endpoint requires opening an Exclusive one.
+    // That could cause a recursive lock.
+    // Lock mSharedLock before mExclusiveLock.
+    // it is OK to only lock mExclusiveLock.
+    mutable std::mutex                                     mSharedLock;
+    std::vector<android::sp<AAudioServiceEndpointShared>>  mSharedStreams;
+
+    mutable std::mutex                                     mExclusiveLock;
+    std::vector<android::sp<AAudioServiceEndpointMMAP>>    mExclusiveStreams;
 
 };
 
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 3992719..855ae69 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -114,14 +114,12 @@
                 mAudioClient.clientUid == IPCThreadState::self()->getCallingUid()) {
             inService = request.isInService();
         }
-        serviceStream = new AAudioServiceStreamMMAP(mAudioClient, inService);
-        result = serviceStream->open(request, configurationOutput);
+        serviceStream = new AAudioServiceStreamMMAP(*this, inService);
+        result = serviceStream->open(request);
         if (result != AAUDIO_OK) {
-            // fall back to using a shared stream
+            // Clear it so we can possibly fall back to using a shared stream.
             ALOGW("AAudioService::openStream(), could not open in EXCLUSIVE mode");
             serviceStream.clear();
-        } else {
-            configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
         }
     }
 
@@ -129,8 +127,7 @@
     if (sharingMode == AAUDIO_SHARING_MODE_SHARED
          || (serviceStream == nullptr && !sharingModeMatchRequired)) {
         serviceStream =  new AAudioServiceStreamShared(*this);
-        result = serviceStream->open(request, configurationOutput);
-        configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
+        result = serviceStream->open(request);
     }
 
     if (result != AAUDIO_OK) {
@@ -149,6 +146,7 @@
             serviceStream->setHandle(handle);
             pid_t pid = request.getProcessId();
             AAudioClientTracker::getInstance().registerClientStream(pid, serviceStream);
+            configurationOutput.copyFrom(*serviceStream);
         }
         return handle;
     }
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index 8421efc..ffaf538 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -28,7 +28,9 @@
 #include "binding/IAAudioService.h"
 #include "binding/AAudioServiceInterface.h"
 
-#include "AAudioServiceStreamBase.h"
+namespace aaudio {
+    class AAudioServiceStreamBase;
+};
 
 namespace android {
 
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 81f1d1b..cba5bc8 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -33,17 +33,14 @@
 #include "core/AudioStreamBuilder.h"
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamShared.h"
+#include "AAudioServiceEndpointShared.h"
 
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
-#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
-
-// Wait at least this many times longer than the operation should take.
-#define MIN_TIMEOUT_OPERATIONS    4
-
-// This is the maximum size in frames. The effective size can be tuned smaller at runtime.
-#define DEFAULT_BUFFER_CAPACITY   (48 * 8)
+AAudioServiceEndpoint::~AAudioServiceEndpoint() {
+    ALOGD("AAudioServiceEndpoint::~AAudioServiceEndpoint() destroying endpoint %p", this);
+}
 
 std::string AAudioServiceEndpoint::dump() const {
     std::stringstream result;
@@ -53,20 +50,20 @@
             50 /* times */,
             20 /* sleepMs */);
     if (!isLocked) {
-        result << "EndpointManager may be deadlocked\n";
+        result << "AAudioServiceEndpoint may be deadlocked\n";
     }
 
-    AudioStreamInternal     *stream = mStreamInternal;
-    if (stream == nullptr) {
-        result << "null stream!" << "\n";
-    } else {
-        result << "mmap stream: rate = " << stream->getSampleRate() << "\n";
-    }
-
-    result << "    Registered Streams:" << "\n";
+    result << "    Direction:            " << ((getDirection() == AAUDIO_DIRECTION_OUTPUT)
+                                   ? "OUTPUT" : "INPUT") << "\n";
+    result << "    Sample Rate:          " << getSampleRate() << "\n";
+    result << "    Frames Per Burst:     " << mFramesPerBurst << "\n";
+    result << "    Reference Count:      " << mOpenCount << "\n";
+    result << "    Requested Device Id:  " << mRequestedDeviceId << "\n";
+    result << "    Device Id:            " << getDeviceId() << "\n";
+    result << "    Registered Streams:  " << "\n";
     result << AAudioServiceStreamShared::dumpHeader() << "\n";
-    for (sp<AAudioServiceStreamShared> sharedStream : mRegisteredStreams) {
-        result << sharedStream->dump() << "\n";
+    for (const auto stream : mRegisteredStreams) {
+        result << stream->dump() << "\n";
     }
 
     if (isLocked) {
@@ -75,113 +72,44 @@
     return result.str();
 }
 
-// Set up an EXCLUSIVE MMAP stream that will be shared.
-aaudio_result_t AAudioServiceEndpoint::open(const AAudioStreamConfiguration& configuration) {
-    mRequestedDeviceId = configuration.getDeviceId();
-    mStreamInternal = getStreamInternal();
-
-    AudioStreamBuilder builder;
-    builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
-    // Don't fall back to SHARED because that would cause recursion.
-    builder.setSharingModeMatchRequired(true);
-    builder.setDeviceId(mRequestedDeviceId);
-    builder.setFormat(configuration.getFormat());
-    builder.setSampleRate(configuration.getSampleRate());
-    builder.setSamplesPerFrame(configuration.getSamplesPerFrame());
-    builder.setDirection(getDirection());
-    builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
-
-    return getStreamInternal()->open(builder);
-}
-
-aaudio_result_t AAudioServiceEndpoint::close() {
-     return getStreamInternal()->close();
-}
-
-// TODO, maybe use an interface to reduce exposure
-aaudio_result_t AAudioServiceEndpoint::registerStream(sp<AAudioServiceStreamShared>sharedStream) {
-    std::lock_guard<std::mutex> lock(mLockStreams);
-    mRegisteredStreams.push_back(sharedStream);
-    return AAUDIO_OK;
-}
-
-aaudio_result_t AAudioServiceEndpoint::unregisterStream(sp<AAudioServiceStreamShared>sharedStream) {
-    std::lock_guard<std::mutex> lock(mLockStreams);
-    mRegisteredStreams.erase(std::remove(mRegisteredStreams.begin(), mRegisteredStreams.end(), sharedStream),
-              mRegisteredStreams.end());
-    return AAUDIO_OK;
-}
-
-aaudio_result_t AAudioServiceEndpoint::startStream(sp<AAudioServiceStreamShared> sharedStream) {
-    aaudio_result_t result = AAUDIO_OK;
-    if (++mRunningStreams == 1) {
-        // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
-        std::lock_guard<std::mutex> lock(mLockStreams);
-        result = getStreamInternal()->requestStart();
-        startSharingThread_l();
-    }
-    return result;
-}
-
-aaudio_result_t AAudioServiceEndpoint::stopStream(sp<AAudioServiceStreamShared> sharedStream) {
-    // Don't lock here because the disconnectRegisteredStreams also uses the lock.
-    if (--mRunningStreams == 0) { // atomic
-        stopSharingThread();
-        getStreamInternal()->requestStop();
-    }
-    return AAUDIO_OK;
-}
-
-static void *aaudio_endpoint_thread_proc(void *context) {
-    AAudioServiceEndpoint *endpoint = (AAudioServiceEndpoint *) context;
-    if (endpoint != NULL) {
-        return endpoint->callbackLoop();
-    } else {
-        return NULL;
-    }
-}
-
-aaudio_result_t AAudioServiceEndpoint::startSharingThread_l() {
-    // Launch the callback loop thread.
-    int64_t periodNanos = getStreamInternal()->getFramesPerBurst()
-                          * AAUDIO_NANOS_PER_SECOND
-                          / getSampleRate();
-    mCallbackEnabled.store(true);
-    return getStreamInternal()->createThread(periodNanos, aaudio_endpoint_thread_proc, this);
-}
-
-aaudio_result_t AAudioServiceEndpoint::stopSharingThread() {
-    mCallbackEnabled.store(false);
-    aaudio_result_t result = getStreamInternal()->joinThread(NULL);
-    return result;
-}
-
 void AAudioServiceEndpoint::disconnectRegisteredStreams() {
     std::lock_guard<std::mutex> lock(mLockStreams);
-    for(auto sharedStream : mRegisteredStreams) {
-        sharedStream->stop();
-        sharedStream->disconnect();
+    for (const auto stream : mRegisteredStreams) {
+        stream->stop();
+        stream->disconnect();
     }
     mRegisteredStreams.clear();
 }
 
+aaudio_result_t AAudioServiceEndpoint::registerStream(sp<AAudioServiceStreamBase>stream) {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRegisteredStreams.push_back(stream);
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::unregisterStream(sp<AAudioServiceStreamBase>stream) {
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    mRegisteredStreams.erase(std::remove(
+            mRegisteredStreams.begin(), mRegisteredStreams.end(), stream),
+                             mRegisteredStreams.end());
+    return AAUDIO_OK;
+}
+
 bool AAudioServiceEndpoint::matches(const AAudioStreamConfiguration& configuration) {
+    if (configuration.getDirection() != getDirection()) {
+        return false;
+    }
     if (configuration.getDeviceId() != AAUDIO_UNSPECIFIED &&
-            configuration.getDeviceId() != mStreamInternal->getDeviceId()) {
+        configuration.getDeviceId() != getDeviceId()) {
         return false;
     }
     if (configuration.getSampleRate() != AAUDIO_UNSPECIFIED &&
-            configuration.getSampleRate() != mStreamInternal->getSampleRate()) {
+        configuration.getSampleRate() != getSampleRate()) {
         return false;
     }
     if (configuration.getSamplesPerFrame() != AAUDIO_UNSPECIFIED &&
-            configuration.getSamplesPerFrame() != mStreamInternal->getSamplesPerFrame()) {
+        configuration.getSamplesPerFrame() != getSamplesPerFrame()) {
         return false;
     }
     return true;
 }
-
-
-aaudio_result_t AAudioServiceEndpoint::getTimestamp(int64_t *positionFrames, int64_t *timeNanos) {
-    return mStreamInternal->getTimestamp(CLOCK_MONOTONIC, positionFrames, timeNanos);
-}
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 603d497..2ef6234 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -24,72 +24,93 @@
 
 #include "client/AudioStreamInternal.h"
 #include "client/AudioStreamInternalPlay.h"
+#include "core/AAudioStreamParameters.h"
 #include "binding/AAudioServiceMessage.h"
-#include "AAudioServiceStreamShared.h"
-#include "AAudioServiceStreamMMAP.h"
-#include "AAudioMixer.h"
-#include "AAudioService.h"
+#include "binding/AAudioStreamConfiguration.h"
+
+#include "AAudioServiceStreamBase.h"
 
 namespace aaudio {
 
-class AAudioServiceEndpoint {
+/**
+ * AAudioServiceEndpoint is used by a subclass of AAudioServiceStreamBase
+ * to communicate with the underlying audio device or port.
+ */
+class AAudioServiceEndpoint
+        : public virtual android::RefBase
+        , public AAudioStreamParameters {
 public:
-    virtual ~AAudioServiceEndpoint() = default;
 
-    std::string dump() const;
+    virtual ~AAudioServiceEndpoint();
 
-    virtual aaudio_result_t open(const AAudioStreamConfiguration& configuration);
+    virtual std::string dump() const;
 
-    int32_t getSampleRate() const { return mStreamInternal->getSampleRate(); }
-    int32_t getSamplesPerFrame() const { return mStreamInternal->getSamplesPerFrame();  }
-    int32_t getFramesPerBurst() const { return mStreamInternal->getFramesPerBurst();  }
+    virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request) = 0;
 
-    aaudio_result_t registerStream(android::sp<AAudioServiceStreamShared> sharedStream);
-    aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamShared> sharedStream);
-    aaudio_result_t startStream(android::sp<AAudioServiceStreamShared> sharedStream);
-    aaudio_result_t stopStream(android::sp<AAudioServiceStreamShared> sharedStream);
-    aaudio_result_t close();
+    virtual aaudio_result_t close() = 0;
+
+    virtual aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
+
+    virtual aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
+
+    virtual aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
+                                        audio_port_handle_t *clientHandle) = 0;
+
+    virtual aaudio_result_t stopStream(android::sp<AAudioServiceStreamBase> stream,
+                                       audio_port_handle_t clientHandle) = 0;
+
+    virtual aaudio_result_t startClient(const android::AudioClient& client,
+                                        audio_port_handle_t *clientHandle) {
+        ALOGD("AAudioServiceEndpoint::startClient(%p, ...) AAUDIO_ERROR_UNAVAILABLE", &client);
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    virtual aaudio_result_t stopClient(audio_port_handle_t clientHandle) {
+        ALOGD("AAudioServiceEndpoint::stopClient(...) AAUDIO_ERROR_UNAVAILABLE");
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    /**
+     * @param positionFrames
+     * @param timeNanos
+     * @return AAUDIO_OK or AAUDIO_ERROR_UNAVAILABLE or other negative error
+     */
+    virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+
+    virtual aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) = 0;
+
+    int32_t getFramesPerBurst() const {
+        return mFramesPerBurst;
+    }
 
     int32_t getRequestedDeviceId() const { return mRequestedDeviceId; }
-    int32_t getDeviceId() const { return mStreamInternal->getDeviceId(); }
-
-    aaudio_direction_t getDirection() const { return mStreamInternal->getDirection(); }
-
-    void disconnectRegisteredStreams();
-
-    virtual void *callbackLoop() = 0;
-
-    // This should only be called from the AAudioEndpointManager under a mutex.
-    int32_t getReferenceCount() const {
-        return mReferenceCount;
-    }
-
-    // This should only be called from the AAudioEndpointManager under a mutex.
-    void setReferenceCount(int32_t count) {
-        mReferenceCount = count;
-    }
-
-    aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos);
 
     bool matches(const AAudioStreamConfiguration& configuration);
 
-    virtual AudioStreamInternal *getStreamInternal() = 0;
+    // This should only be called from the AAudioEndpointManager under a mutex.
+    int32_t getOpenCount() const {
+        return mOpenCount;
+    }
 
-    std::atomic<bool>        mCallbackEnabled{false};
+    // This should only be called from the AAudioEndpointManager under a mutex.
+    void setOpenCount(int32_t count) {
+        mOpenCount = count;
+    }
+
+protected:
+    void                     disconnectRegisteredStreams();
 
     mutable std::mutex       mLockStreams;
+    std::vector<android::sp<AAudioServiceStreamBase>> mRegisteredStreams;
 
-    std::vector<android::sp<AAudioServiceStreamShared>> mRegisteredStreams;
+    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
 
-    std::atomic<int>         mRunningStreams{0};
+    android::AudioClient     mMmapClient;   // set in open, used in open and startStream
 
-private:
-    aaudio_result_t startSharingThread_l();
-    aaudio_result_t stopSharingThread();
-
-    AudioStreamInternal     *mStreamInternal = nullptr;
-    int32_t                  mReferenceCount = 0;
+    int32_t                  mFramesPerBurst = 0;
+    int32_t                  mOpenCount = 0;
     int32_t                  mRequestedDeviceId = 0;
+
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index 6504cc1..97558ca 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -30,20 +30,22 @@
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamShared.h"
 #include "AAudioServiceEndpointCapture.h"
+#include "AAudioServiceEndpointShared.h"
 
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
 AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService &audioService)
         : mStreamInternalCapture(audioService, true) {
+    mStreamInternal = &mStreamInternalCapture;
 }
 
 AAudioServiceEndpointCapture::~AAudioServiceEndpointCapture() {
     delete mDistributionBuffer;
 }
 
-aaudio_result_t AAudioServiceEndpointCapture::open(const AAudioStreamConfiguration& configuration) {
-    aaudio_result_t result = AAudioServiceEndpoint::open(configuration);
+aaudio_result_t AAudioServiceEndpointCapture::open(const aaudio::AAudioStreamRequest &request) {
+    aaudio_result_t result = AAudioServiceEndpointShared::open(request);
     if (result == AAUDIO_OK) {
         delete mDistributionBuffer;
         int distributionBufferSizeBytes = getStreamInternal()->getFramesPerBurst()
@@ -80,16 +82,19 @@
         { // brackets are for lock_guard
 
             std::lock_guard <std::mutex> lock(mLockStreams);
-            for (sp<AAudioServiceStreamShared> clientStream : mRegisteredStreams) {
+            for (const auto clientStream : mRegisteredStreams) {
                 if (clientStream->isRunning()) {
-                    FifoBuffer *fifo = clientStream->getDataFifoBuffer();
+                    AAudioServiceStreamShared *streamShared =
+                            static_cast<AAudioServiceStreamShared *>(clientStream.get());
+
+                    FifoBuffer *fifo = streamShared->getDataFifoBuffer();
 
                     // Determine offset between framePosition in client's stream vs the underlying
                     // MMAP stream.
                     int64_t clientFramesWritten = fifo->getWriteCounter();
                     // There are two indices that refer to the same frame.
                     int64_t positionOffset = mmapFramesRead - clientFramesWritten;
-                    clientStream->setTimestampPositionOffset(positionOffset);
+                    streamShared->setTimestampPositionOffset(positionOffset);
 
                     if (fifo->getFifoControllerBase()->getEmptyFramesAvailable() <
                         getFramesPerBurst()) {
@@ -102,7 +107,7 @@
                     // client buffer. It is sent to the client and used in the timing model
                     // to decide when data will be available to read.
                     Timestamp timestamp(fifo->getWriteCounter(), AudioClock::getNanoseconds());
-                    clientStream->markTransferTime(timestamp);
+                    streamShared->markTransferTime(timestamp);
                 }
             }
         }
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.h b/services/oboeservice/AAudioServiceEndpointCapture.h
index 8a3d72f..971da9a 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.h
+++ b/services/oboeservice/AAudioServiceEndpointCapture.h
@@ -20,18 +20,18 @@
 #include "client/AudioStreamInternal.h"
 #include "client/AudioStreamInternalCapture.h"
 
+#include "AAudioServiceEndpointShared.h"
+#include "AAudioServiceStreamShared.h"
+
 namespace aaudio {
 
-class AAudioServiceEndpointCapture : public AAudioServiceEndpoint {
+class AAudioServiceEndpointCapture : public AAudioServiceEndpointShared {
 public:
     explicit AAudioServiceEndpointCapture(android::AAudioService &audioService);
     virtual ~AAudioServiceEndpointCapture();
 
-    aaudio_result_t open(const AAudioStreamConfiguration& configuration) override;
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
-    AudioStreamInternal *getStreamInternal() override {
-        return &mStreamInternalCapture;
-    }
 
     void *callbackLoop() override;
 
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
new file mode 100644
index 0000000..58213f8
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioServiceEndpointMMAP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <algorithm>
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <sstream>
+#include <utils/Singleton.h>
+#include <vector>
+
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceEndpointPlay.h"
+#include "AAudioServiceEndpointMMAP.h"
+
+
+#define AAUDIO_BUFFER_CAPACITY_MIN    4 * 512
+#define AAUDIO_SAMPLE_RATE_DEFAULT    48000
+
+// This is an estimate of the time difference between the HW and the MMAP time.
+// TODO Get presentation timestamps from the HAL instead of using these estimates.
+#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS  (3 * AAUDIO_NANOS_PER_MILLISECOND)
+#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS   (-1 * AAUDIO_NANOS_PER_MILLISECOND)
+
+using namespace android;  // TODO just import names needed
+using namespace aaudio;   // TODO just import names needed
+
+AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP()
+        :  mMmapStream(nullptr) {}
+
+AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
+
+std::string AAudioServiceEndpointMMAP::dump() const {
+    std::stringstream result;
+
+    result << "  MMAP: framesTransferred = " << mFramesTransferred.get();
+    result << ", HW nanos = " << mHardwareTimeOffsetNanos;
+    result << ", port handle = " << mPortHandle;
+    result << ", audio data FD = " << mAudioDataFileDescriptor;
+    result << "\n";
+
+    result << "    HW Offset Micros:     " <<
+                                      (getHardwareTimeOffsetNanos()
+                                       / AAUDIO_NANOS_PER_MICROSECOND) << "\n";
+
+    result << AAudioServiceEndpoint::dump();
+    return result.str();
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
+    aaudio_result_t result = AAUDIO_OK;
+    const audio_attributes_t attributes = {
+            .content_type = AUDIO_CONTENT_TYPE_MUSIC,
+            .usage = AUDIO_USAGE_MEDIA,
+            .source = AUDIO_SOURCE_VOICE_RECOGNITION,
+            .flags = AUDIO_FLAG_LOW_LATENCY,
+            .tags = ""
+    };
+    audio_config_base_t config;
+    audio_port_handle_t deviceId;
+
+    int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
+    int32_t burstMicros = 0;
+
+    copyFrom(request.getConstantConfiguration());
+
+    mMmapClient.clientUid = request.getUserId();
+    mMmapClient.clientPid = request.getProcessId();
+    mMmapClient.packageName.setTo(String16(""));
+
+    mRequestedDeviceId = deviceId = getDeviceId();
+
+    // Fill in config
+    aaudio_format_t aaudioFormat = getFormat();
+    if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+        aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+    }
+    config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
+
+    int32_t aaudioSampleRate = getSampleRate();
+    if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
+        aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
+    }
+    config.sample_rate = aaudioSampleRate;
+
+    int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
+
+    aaudio_direction_t direction = getDirection();
+    if (direction == AAUDIO_DIRECTION_OUTPUT) {
+        config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+                              ? AUDIO_CHANNEL_OUT_STEREO
+                              : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
+        mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
+
+    } else if (direction == AAUDIO_DIRECTION_INPUT) {
+        config.channel_mask =  (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+                               ? AUDIO_CHANNEL_IN_STEREO
+                               : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
+        mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
+
+    } else {
+        ALOGE("openMmapStream - invalid direction = %d", direction);
+        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    }
+
+    MmapStreamInterface::stream_direction_t streamDirection =
+            (direction == AAUDIO_DIRECTION_OUTPUT)
+            ? MmapStreamInterface::DIRECTION_OUTPUT
+            : MmapStreamInterface::DIRECTION_INPUT;
+
+    // Open HAL stream. Set mMmapStream
+    status_t status = MmapStreamInterface::openMmapStream(streamDirection,
+                                                          &attributes,
+                                                          &config,
+                                                          mMmapClient,
+                                                          &deviceId,
+                                                          this, // callback
+                                                          mMmapStream,
+                                                          &mPortHandle);
+    ALOGD("AAudioServiceEndpointMMAP::open() mMapClient.uid = %d, pid = %d => portHandle = %d\n",
+          mMmapClient.clientUid,  mMmapClient.clientPid, mPortHandle);
+    if (status != OK) {
+        ALOGE("openMmapStream returned status %d", status);
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+
+    if (deviceId == AAUDIO_UNSPECIFIED) {
+        ALOGW("AAudioServiceEndpointMMAP::open() - openMmapStream() failed to set deviceId");
+    }
+    setDeviceId(deviceId);
+
+    // Create MMAP/NOIRQ buffer.
+    int32_t minSizeFrames = getBufferCapacity();
+    if (minSizeFrames <= 0) { // zero will get rejected
+        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
+    }
+    status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+    if (status != OK) {
+        ALOGE("AAudioServiceEndpointMMAP::open() - createMmapBuffer() failed with status %d %s",
+              status, strerror(-status));
+        result = AAUDIO_ERROR_UNAVAILABLE;
+        goto error;
+    } else {
+        ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
+                      ", Sharable FD: %s",
+              status,
+              abs(mMmapBufferinfo.buffer_size_frames),
+              mMmapBufferinfo.burst_size_frames,
+              mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
+    }
+
+    setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
+    // The audio HAL indicates if the shared memory fd can be shared outside of audioserver
+    // by returning a negative buffer size
+    if (getBufferCapacity() < 0) {
+        // Exclusive mode can be used by client or service.
+        setBufferCapacity(-getBufferCapacity());
+    } else {
+        // Exclusive mode can only be used by the service because the FD cannot be shared.
+        uid_t audioServiceUid = getuid();
+        if ((mMmapClient.clientUid != audioServiceUid) &&
+            getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+            // Fallback is handled by caller but indicate what is possible in case
+            // this is used in the future
+            setSharingMode(AAUDIO_SHARING_MODE_SHARED);
+            ALOGW("AAudioServiceEndpointMMAP::open() - exclusive FD cannot be used by client");
+            result = AAUDIO_ERROR_UNAVAILABLE;
+            goto error;
+        }
+    }
+
+    // Get information about the stream and pass it back to the caller.
+    setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
+                       ? audio_channel_count_from_out_mask(config.channel_mask)
+                       : audio_channel_count_from_in_mask(config.channel_mask));
+
+    // AAudio creates a copy of this FD and retains ownership of the copy.
+    // Assume that AudioFlinger will close the original shared_memory_fd.
+    mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
+    if (mAudioDataFileDescriptor.get() == -1) {
+        ALOGE("AAudioServiceEndpointMMAP::open() - could not dup shared_memory_fd");
+        result = AAUDIO_ERROR_INTERNAL;
+        goto error;
+    }
+    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
+    setFormat(AAudioConvert_androidToAAudioDataFormat(config.format));
+    setSampleRate(config.sample_rate);
+
+    // Scale up the burst size to meet the minimum equivalent in microseconds.
+    // This is to avoid waking the CPU too often when the HW burst is very small
+    // or at high sample rates.
+    do {
+        if (burstMicros > 0) {  // skip first loop
+            mFramesPerBurst *= 2;
+        }
+        burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
+    } while (burstMicros < burstMinMicros);
+
+    ALOGD("AAudioServiceEndpointMMAP::open() original burst = %d, minMicros = %d, to burst = %d\n",
+          mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
+
+    ALOGD("AAudioServiceEndpointMMAP::open() actual rate = %d, channels = %d"
+          ", deviceId = %d, capacity = %d\n",
+          getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
+
+    return result;
+
+error:
+    close();
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::close() {
+
+    if (mMmapStream != 0) {
+        ALOGD("AAudioServiceEndpointMMAP::close() clear() endpoint");
+        // Needs to be explicitly cleared or CTS will fail but it is not clear why.
+        mMmapStream.clear();
+        // Apparently the above close is asynchronous. An attempt to open a new device
+        // right after a close can fail. Also some callbacks may still be in flight!
+        // FIXME Make closing synchronous.
+        AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
+    }
+
+    return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
+                                                   audio_port_handle_t *clientHandle) {
+    return startClient(mMmapClient, &mPortHandle);
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
+                                                  audio_port_handle_t clientHandle) {
+    mFramesTransferred.reset32();
+    return stopClient(mPortHandle);
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
+                                                       audio_port_handle_t *clientHandle) {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+    audio_port_handle_t originalHandle =  *clientHandle;
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->start(client,
+                                                                                    clientHandle));
+    ALOGD("AAudioServiceEndpointMMAP::startClient(%p(uid=%d, pid=%d), %d => %d) returns %d",
+          &client, client.clientUid, client.clientPid,
+          originalHandle, *clientHandle, result);
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) {
+    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
+    ALOGD("AAudioServiceEndpointMMAP::stopClient(%d) returns %d", clientHandle, result);
+    return result;
+}
+
+// Get free-running DSP or DMA hardware position from the HAL.
+aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
+                                                                int64_t *timeNanos) {
+    struct audio_mmap_position position;
+    if (mMmapStream == nullptr) {
+        return AAUDIO_ERROR_NULL;
+    }
+    status_t status = mMmapStream->getMmapPosition(&position);
+    ALOGV("AAudioServiceEndpointMMAP::getFreeRunningPosition() status= %d, pos = %d, nanos = %lld\n",
+          status, position.position_frames, (long long) position.time_nanoseconds);
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
+    if (result == AAUDIO_ERROR_UNAVAILABLE) {
+        ALOGW("sendCurrentTimestamp(): getMmapPosition() has no position data available");
+    } else if (result != AAUDIO_OK) {
+        ALOGE("sendCurrentTimestamp(): getMmapPosition() returned status %d", status);
+    } else {
+        // Convert 32-bit position to 64-bit position.
+        mFramesTransferred.update32(position.position_frames);
+        *positionFrames = mFramesTransferred.get();
+        *timeNanos = position.time_nanoseconds;
+    }
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointMMAP::getTimestamp(int64_t *positionFrames,
+                                                    int64_t *timeNanos) {
+    return 0; // TODO
+}
+
+
+void AAudioServiceEndpointMMAP::onTearDown() {
+    ALOGD("AAudioServiceEndpointMMAP::onTearDown() called");
+    disconnectRegisteredStreams();
+};
+
+void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
+                                              android::Vector<float> values) {
+    // TODO do we really need a different volume for each channel?
+    float volume = values[0];
+    ALOGD("AAudioServiceEndpointMMAP::onVolumeChanged() volume[0] = %f", volume);
+    std::lock_guard<std::mutex> lock(mLockStreams);
+    for(const auto stream : mRegisteredStreams) {
+        stream->onVolumeChanged(volume);
+    }
+};
+
+void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
+    ALOGD("AAudioServiceEndpointMMAP::onRoutingChanged() called with %d, old = %d",
+          deviceId, getDeviceId());
+    if (getDeviceId() != AUDIO_PORT_HANDLE_NONE  && getDeviceId() != deviceId) {
+        disconnectRegisteredStreams();
+    }
+    setDeviceId(deviceId);
+};
+
+/**
+ * Get an immutable description of the data queue from the HAL.
+ */
+aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+    // Gather information on the data queue based on HAL info.
+    int32_t bytesPerFrame = calculateBytesPerFrame();
+    int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
+    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+    parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
+    return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
new file mode 100644
index 0000000..16b6269
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_MMAP_H
+#define AAUDIO_SERVICE_ENDPOINT_MMAP_H
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceEndpointShared.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+/**
+ * This is used by AAudioServiceStreamMMAP to access the MMAP devices
+ * through AudioFlinger.
+ */
+class AAudioServiceEndpointMMAP
+        : public AAudioServiceEndpoint
+        , public android::MmapStreamCallback {
+
+public:
+    AAudioServiceEndpointMMAP();
+
+    virtual ~AAudioServiceEndpointMMAP();
+
+    std::string dump() const override;
+
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
+
+    aaudio_result_t close() override;
+
+    aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
+                                audio_port_handle_t *clientHandle) override;
+
+    aaudio_result_t stopStream(android::sp<AAudioServiceStreamBase> stream,
+                               audio_port_handle_t clientHandle) override;
+
+    aaudio_result_t startClient(const android::AudioClient& client,
+                                        audio_port_handle_t *clientHandle)  override;
+
+    aaudio_result_t stopClient(audio_port_handle_t clientHandle)  override;
+
+    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+    aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+
+    // -------------- Callback functions for MmapStreamCallback ---------------------
+    void onTearDown() override;
+
+    void onVolumeChanged(audio_channel_mask_t channels,
+                         android::Vector<float> values) override;
+
+    void onRoutingChanged(audio_port_handle_t deviceId) override;
+    // ------------------------------------------------------------------------------
+
+    aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable);
+
+    int64_t getHardwareTimeOffsetNanos() const {
+        return mHardwareTimeOffsetNanos;
+    }
+
+private:
+    MonotonicCounter                          mFramesTransferred;
+
+    // Interface to the AudioFlinger MMAP support.
+    android::sp<android::MmapStreamInterface> mMmapStream;
+    struct audio_mmap_buffer_info             mMmapBufferinfo;
+    audio_port_handle_t                       mPortHandle = AUDIO_PORT_HANDLE_NONE;
+
+    android::base::unique_fd                  mAudioDataFileDescriptor;
+
+    int64_t                                   mHardwareTimeOffsetNanos = 0; // TODO get from HAL
+
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SERVICE_ENDPOINT_MMAP_H
+
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 8b1cc9f..c42a6e2 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "AAudioServiceEndpointPlay"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
@@ -33,6 +33,7 @@
 #include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamShared.h"
 #include "AAudioServiceEndpointPlay.h"
+#include "AAudioServiceEndpointShared.h"
 
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
@@ -41,13 +42,14 @@
 
 AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
         : mStreamInternalPlay(audioService, true) {
+    mStreamInternal = &mStreamInternalPlay;
 }
 
 AAudioServiceEndpointPlay::~AAudioServiceEndpointPlay() {
 }
 
-aaudio_result_t AAudioServiceEndpointPlay::open(const AAudioStreamConfiguration& configuration) {
-    aaudio_result_t result = AAudioServiceEndpoint::open(configuration);
+aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) {
+    aaudio_result_t result = AAudioServiceEndpointShared::open(request);
     if (result == AAUDIO_OK) {
         mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
                         getStreamInternal()->getFramesPerBurst());
@@ -72,35 +74,42 @@
     while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) {
         // Mix data from each active stream.
         mMixer.clear();
+
         { // brackets are for lock_guard
             int index = 0;
             int64_t mmapFramesWritten = getStreamInternal()->getFramesWritten();
 
             std::lock_guard <std::mutex> lock(mLockStreams);
-            for (sp<AAudioServiceStreamShared> clientStream : mRegisteredStreams) {
-                if (clientStream->isRunning()) {
-                    FifoBuffer *fifo = clientStream->getDataFifoBuffer();
-                    // Determine offset between framePosition in client's stream vs the underlying
-                    // MMAP stream.
-                    int64_t clientFramesRead = fifo->getReadCounter();
-                    // These two indices refer to the same frame.
-                    int64_t positionOffset = mmapFramesWritten - clientFramesRead;
-                    clientStream->setTimestampPositionOffset(positionOffset);
-
-                    float volume = 1.0; // to match legacy volume
-                    bool underflowed = mMixer.mix(index, fifo, volume);
-
-                    // This timestamp represents the completion of data being read out of the
-                    // client buffer. It is sent to the client and used in the timing model
-                    // to decide when the client has room to write more data.
-                    Timestamp timestamp(fifo->getReadCounter(), AudioClock::getNanoseconds());
-                    clientStream->markTransferTime(timestamp);
-
-                    if (underflowed) {
-                        clientStream->incrementXRunCount();
-                    }
+            for (const auto clientStream : mRegisteredStreams) {
+                if (!clientStream->isRunning()) {
+                    continue;
                 }
-                index++;
+
+                AAudioServiceStreamShared *streamShared =
+                        static_cast<AAudioServiceStreamShared *>(clientStream.get());
+
+                FifoBuffer *fifo = streamShared->getDataFifoBuffer();
+                // Determine offset between framePosition in client's stream vs the underlying
+                // MMAP stream.
+                int64_t clientFramesRead = fifo->getReadCounter();
+                // These two indices refer to the same frame.
+                int64_t positionOffset = mmapFramesWritten - clientFramesRead;
+                streamShared->setTimestampPositionOffset(positionOffset);
+
+                float volume = 1.0; // to match legacy volume
+                bool underflowed = mMixer.mix(index, fifo, volume);
+
+                // This timestamp represents the completion of data being read out of the
+                // client buffer. It is sent to the client and used in the timing model
+                // to decide when the client has room to write more data.
+                Timestamp timestamp(fifo->getReadCounter(), AudioClock::getNanoseconds());
+                streamShared->markTransferTime(timestamp);
+
+                if (underflowed) {
+                    streamShared->incrementXRunCount();
+                }
+
+                index++; // just used for labelling tracks in systrace
             }
         }
 
@@ -108,7 +117,7 @@
         result = getStreamInternal()->write(mMixer.getOutputBuffer(),
                                             getFramesPerBurst(), timeoutNanos);
         if (result == AAUDIO_ERROR_DISCONNECTED) {
-            disconnectRegisteredStreams();
+            AAudioServiceEndpointShared::disconnectRegisteredStreams();
             break;
         } else if (result != getFramesPerBurst()) {
             ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
index c22f510..a0a383c 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.h
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -25,6 +25,7 @@
 #include "client/AudioStreamInternal.h"
 #include "client/AudioStreamInternalPlay.h"
 #include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceEndpointShared.h"
 #include "AAudioServiceStreamShared.h"
 #include "AAudioServiceStreamMMAP.h"
 #include "AAudioMixer.h"
@@ -35,16 +36,12 @@
 /**
  * Contains a mixer and a stream for writing the result of the mix.
  */
-class AAudioServiceEndpointPlay : public AAudioServiceEndpoint {
+class AAudioServiceEndpointPlay : public AAudioServiceEndpointShared {
 public:
     explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
     virtual ~AAudioServiceEndpointPlay();
 
-    aaudio_result_t open(const AAudioStreamConfiguration& configuration) override;
-
-    AudioStreamInternal *getStreamInternal() override {
-        return &mStreamInternalPlay;
-    }
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
     void *callbackLoop() override;
 
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
new file mode 100644
index 0000000..43d73b7
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_TAG "AAudioServiceEndpointShared"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+
+#include "binding/AAudioServiceMessage.h"
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "core/AudioStreamBuilder.h"
+
+#include "AAudioServiceEndpointShared.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+using namespace android;
+using namespace aaudio;
+
+// This is the maximum size in frames. The effective size can be tuned smaller at runtime.
+#define DEFAULT_BUFFER_CAPACITY   (48 * 8)
+
+std::string AAudioServiceEndpointShared::dump() const {
+    std::stringstream result;
+
+    result << "  SHARED: sharing exclusive stream with handle = 0x"
+           << std::setfill('0') << std::setw(8)
+           << std::hex << mStreamInternal->getServiceHandle()
+           << std::dec << std::setfill(' ');
+    result << "\n";
+    result << "    Running Stream Count: " << mRunningStreamCount << "\n";
+
+    result << AAudioServiceEndpoint::dump();
+    return result.str();
+}
+
+// Share an AudioStreamInternal.
+aaudio_result_t AAudioServiceEndpointShared::open(const aaudio::AAudioStreamRequest &request) {
+    aaudio_result_t result = AAUDIO_OK;
+    const AAudioStreamConfiguration &configuration = request.getConstantConfiguration();
+
+    mRequestedDeviceId = configuration.getDeviceId();
+    setDirection(configuration.getDirection());
+
+    AudioStreamBuilder builder;
+    builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+    // Don't fall back to SHARED because that would cause recursion.
+    builder.setSharingModeMatchRequired(true);
+    builder.setDeviceId(mRequestedDeviceId);
+    builder.setFormat(configuration.getFormat());
+    builder.setSampleRate(configuration.getSampleRate());
+    builder.setSamplesPerFrame(configuration.getSamplesPerFrame());
+    builder.setDirection(configuration.getDirection());
+    builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
+
+    result = mStreamInternal->open(builder);
+
+    setSampleRate(mStreamInternal->getSampleRate());
+    setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
+    setDeviceId(mStreamInternal->getDeviceId());
+    mFramesPerBurst = mStreamInternal->getFramesPerBurst();
+
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointShared::close() {
+    return getStreamInternal()->close();
+}
+
+// Glue between C and C++ callbacks.
+static void *aaudio_endpoint_thread_proc(void *context) {
+    AAudioServiceEndpointShared *endpoint = (AAudioServiceEndpointShared *) context;
+    if (endpoint != NULL) {
+        return endpoint->callbackLoop();
+    } else {
+        return NULL;
+    }
+}
+
+aaudio_result_t aaudio::AAudioServiceEndpointShared::startSharingThread_l() {
+    // Launch the callback loop thread.
+    int64_t periodNanos = getStreamInternal()->getFramesPerBurst()
+                          * AAUDIO_NANOS_PER_SECOND
+                          / getSampleRate();
+    mCallbackEnabled.store(true);
+    return getStreamInternal()->createThread(periodNanos, aaudio_endpoint_thread_proc, this);
+}
+
+aaudio_result_t aaudio::AAudioServiceEndpointShared::stopSharingThread() {
+    mCallbackEnabled.store(false);
+    aaudio_result_t result = getStreamInternal()->joinThread(NULL);
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointShared::startStream(sp<AAudioServiceStreamBase> sharedStream,
+                                                         audio_port_handle_t *clientHandle) {
+    aaudio_result_t result = AAUDIO_OK;
+    if (++mRunningStreamCount == 1) {
+        // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
+        std::lock_guard<std::mutex> lock(mLockStreams);
+        result = getStreamInternal()->requestStart();
+        startSharingThread_l();
+    }
+    if (result == AAUDIO_OK) {
+        ALOGD("AAudioServiceEndpointShared::startStream() use shared stream client.");
+        result = getStreamInternal()->startClient(sharedStream->getAudioClient(), clientHandle);
+    }
+    return result;
+}
+
+aaudio_result_t AAudioServiceEndpointShared::stopStream(sp<AAudioServiceStreamBase> sharedStream,
+                                                        audio_port_handle_t clientHandle) {
+    // Don't lock here because the disconnectRegisteredStreams also uses the lock.
+
+    // Ignore result.
+    (void) getStreamInternal()->stopClient(clientHandle);
+
+    if (--mRunningStreamCount == 0) { // atomic
+        stopSharingThread();
+        getStreamInternal()->requestStop();
+    }
+    return AAUDIO_OK;
+}
+
+
+// Get timestamp that was written by the real-time service thread, eg. mixer.
+aaudio_result_t AAudioServiceEndpointShared::getFreeRunningPosition(int64_t *positionFrames,
+                                                                  int64_t *timeNanos) {
+    if (mAtomicTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicTimestamp.read();
+        *positionFrames = timestamp.getPosition();
+        *timeNanos = timestamp.getNanoseconds();
+        return AAUDIO_OK;
+    } else {
+        return AAUDIO_ERROR_UNAVAILABLE;
+    }
+}
+
+aaudio_result_t AAudioServiceEndpointShared::getTimestamp(int64_t *positionFrames,
+                                                          int64_t *timeNanos) {
+    return mStreamInternal->getTimestamp(CLOCK_MONOTONIC, positionFrames, timeNanos);
+}
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
new file mode 100644
index 0000000..e3bd2c1
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_SHARED_H
+#define AAUDIO_SERVICE_ENDPOINT_SHARED_H
+
+#include <atomic>
+#include <mutex>
+
+#include "AAudioServiceEndpoint.h"
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+/**
+ * This Service class corresponds to a Client stream that shares an MMAP device through a mixer
+ * or an input distributor.
+ */
+class AAudioServiceEndpointShared : public AAudioServiceEndpoint {
+
+public:
+
+    std::string dump() const override;
+
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
+
+    aaudio_result_t close() override;
+
+    aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
+                                audio_port_handle_t *clientHandle) override;
+
+    aaudio_result_t stopStream(android::sp<AAudioServiceStreamBase> stream,
+                               audio_port_handle_t clientHandle) override;
+
+    aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+    aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
+
+    virtual void            *callbackLoop() = 0;
+
+    AudioStreamInternal *getStreamInternal() const {
+        return mStreamInternal;
+    };
+
+protected:
+
+    aaudio_result_t          startSharingThread_l();
+
+    aaudio_result_t          stopSharingThread();
+
+    // pointer to object statically allocated in subclasses
+    AudioStreamInternal     *mStreamInternal = nullptr;
+
+    std::atomic<bool>        mCallbackEnabled{false};
+
+    std::atomic<int>         mRunningStreamCount{0};
+};
+
+}
+
+#endif //AAUDIO_SERVICE_ENDPOINT_SHARED_H
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index e5f916c..2dc62a0 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -26,6 +26,9 @@
 #include "binding/AAudioServiceMessage.h"
 #include "utility/AudioClock.h"
 
+#include "AAudioEndpointManager.h"
+#include "AAudioService.h"
+#include "AAudioServiceEndpoint.h"
 #include "AAudioServiceStreamBase.h"
 #include "TimestampScheduler.h"
 
@@ -37,10 +40,11 @@
  * @return
  */
 
-AAudioServiceStreamBase::AAudioServiceStreamBase()
+AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
         : mUpMessageQueue(nullptr)
         , mAAudioThread()
-        , mAtomicTimestamp() {
+        , mAtomicTimestamp()
+        , mAudioService(audioService) {
     mMmapClient.clientUid = -1;
     mMmapClient.clientPid = -1;
     mMmapClient.packageName = String16("");
@@ -68,49 +72,112 @@
     result << std::setw(6) << mMmapClient.clientUid;
     result << std::setw(4) << (isRunning() ? "yes" : " no");
     result << std::setw(6) << mState;
-    result << std::setw(7) << mAudioFormat;
+    result << std::setw(7) << getFormat();
     result << std::setw(6) << mFramesPerBurst;
-    result << std::setw(5) << mSamplesPerFrame;
-    result << std::setw(9) << mCapacityInFrames;
+    result << std::setw(5) << getSamplesPerFrame();
+    result << std::setw(9) << getBufferCapacity();
 
     return result.str();
 }
 
 aaudio_result_t AAudioServiceStreamBase::open(const aaudio::AAudioStreamRequest &request,
-                     aaudio::AAudioStreamConfiguration &configurationOutput) {
+                                              aaudio_sharing_mode_t sharingMode) {
+    AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+    aaudio_result_t result = AAUDIO_OK;
 
     mMmapClient.clientUid = request.getUserId();
     mMmapClient.clientPid = request.getProcessId();
-    mMmapClient.packageName.setTo(String16("")); // FIXME what should we do here?
+    mMmapClient.packageName.setTo(String16("")); // TODO What should we do here?
 
-    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
-    if (mUpMessageQueue != nullptr) {
-        return AAUDIO_ERROR_INVALID_STATE;
-    } else {
+    // Limit scope of lock to avoid recursive lock in close().
+    {
+        std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
+        if (mUpMessageQueue != nullptr) {
+            ALOGE("AAudioServiceStreamBase::open() called twice");
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
+
         mUpMessageQueue = new SharedRingBuffer();
-        return mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+        result = mUpMessageQueue->allocate(sizeof(AAudioServiceMessage),
+                                           QUEUE_UP_CAPACITY_COMMANDS);
+        if (result != AAUDIO_OK) {
+            goto error;
+        }
+
+        mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService,
+                                                         request,
+                                                         sharingMode);
+        if (mServiceEndpoint == nullptr) {
+            ALOGE("AAudioServiceStreamBase::open() openEndpoint() failed");
+            result = AAUDIO_ERROR_UNAVAILABLE;
+            goto error;
+        }
+        mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
+        copyFrom(*mServiceEndpoint);
     }
+    return result;
+
+error:
+    close();
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
-    if (mState != AAUDIO_STREAM_STATE_CLOSED) {
+    aaudio_result_t result = AAUDIO_OK;
+    if (mState == AAUDIO_STREAM_STATE_CLOSED) {
+        return AAUDIO_OK;
+    }
+
+    stop();
+
+    if (mServiceEndpoint == nullptr) {
+        result = AAUDIO_ERROR_INVALID_STATE;
+    } else {
+        mServiceEndpoint->unregisterStream(this);
+        AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+        mEndpointManager.closeEndpoint(mServiceEndpoint);
+        mServiceEndpoint.clear();
+    }
+
+    {
+        std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
         stopTimestampThread();
-        std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
         delete mUpMessageQueue;
         mUpMessageQueue = nullptr;
-        mState = AAUDIO_STREAM_STATE_CLOSED;
     }
-    return AAUDIO_OK;
+
+    mState = AAUDIO_STREAM_STATE_CLOSED;
+    return result;
 }
 
+/**
+ * Start the flow of audio data.
+ *
+ * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+ */
 aaudio_result_t AAudioServiceStreamBase::start() {
     if (isRunning()) {
         return AAUDIO_OK;
     }
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
-    mState = AAUDIO_STREAM_STATE_STARTED;
-    mThreadEnabled.store(true);
-    return mAAudioThread.start(this);
+
+    if (mServiceEndpoint == nullptr) {
+        ALOGE("AAudioServiceStreamBase::start() missing endpoint");
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    mClientHandle = AUDIO_PORT_HANDLE_NONE;
+    aaudio_result_t result = mServiceEndpoint->startStream(this, &mClientHandle);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamBase::start() mServiceEndpoint returned %d", result);
+        disconnect();
+    } else {
+        if (result == AAUDIO_OK) {
+            sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+            mState = AAUDIO_STREAM_STATE_STARTED;
+            mThreadEnabled.store(true);
+            result = mAAudioThread.start(this);
+        }
+    }
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
@@ -118,6 +185,16 @@
     if (!isRunning()) {
         return result;
     }
+    if (mServiceEndpoint == nullptr) {
+        ALOGE("AAudioServiceStreamShared::pause() missing endpoint");
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    result = mServiceEndpoint->stopStream(this, mClientHandle);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
+        disconnect(); // TODO should we return or pause Base first?
+    }
+
     sendCurrentTimestamp();
     mThreadEnabled.store(false);
     result = mAAudioThread.stop();
@@ -135,13 +212,27 @@
     if (!isRunning()) {
         return result;
     }
-    // TODO wait for data to be played out
+
+    if (mServiceEndpoint == nullptr) {
+        ALOGE("AAudioServiceStreamShared::stop() missing endpoint");
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+
     sendCurrentTimestamp(); // warning - this calls a virtual function
     result = stopTimestampThread();
     if (result != AAUDIO_OK) {
         disconnect();
         return result;
     }
+
+    // TODO wait for data to be played out
+    result = mServiceEndpoint->stopStream(this, mClientHandle);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
+        disconnect();
+        // TODO what to do with result here?
+    }
+
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
     mState = AAUDIO_STREAM_STATE_STOPPED;
     return result;
@@ -157,6 +248,12 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::flush() {
+    if (mState != AAUDIO_STREAM_STATE_PAUSED) {
+        ALOGE("AAudioServiceStreamBase::flush() stream not paused, state = %s",
+              AAudio_convertStreamStateToText(mState));
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    // Data will get flushed when the client receives the FLUSHED event.
     sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
     mState = AAUDIO_STREAM_STATE_FLUSHED;
     return AAUDIO_OK;
@@ -166,7 +263,7 @@
 void AAudioServiceStreamBase::run() {
     ALOGD("AAudioServiceStreamBase::run() entering ----------------");
     TimestampScheduler timestampScheduler;
-    timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
+    timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
     timestampScheduler.start(AudioClock::getNanoseconds());
     int64_t nextTime = timestampScheduler.nextAbsoluteTime();
     while(mThreadEnabled.load()) {
@@ -204,7 +301,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
-    std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+    std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
     if (mUpMessageQueue == nullptr) {
         ALOGE("writeUpMessageQueue(): mUpMessageQueue null! - stream not open");
         return AAUDIO_ERROR_NULL;
@@ -254,3 +351,7 @@
                                     parcelable.mUpMessageQueueParcelable);
     return getDownDataDescription(parcelable);
 }
+
+void AAudioServiceStreamBase::onVolumeChanged(float volume) {
+    sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 2f94614..301795d 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -32,22 +32,28 @@
 
 #include "SharedRingBuffer.h"
 #include "AAudioThread.h"
+#include "AAudioService.h"
 
 namespace aaudio {
 
+class AAudioServiceEndpoint;
+
 // We expect the queue to only have a few commands.
 // This should be way more than we need.
 #define QUEUE_UP_CAPACITY_COMMANDS (128)
 
 /**
- * Base class for a stream in the AAudio service.
+ * Each instance of AAudioServiceStreamBase corresponds to a client stream.
+ * It uses a subclass of AAudioServiceEndpoint to communicate with the underlying device or port.
  */
 class AAudioServiceStreamBase
     : public virtual android::RefBase
+    , public AAudioStreamParameters
     , public Runnable  {
 
 public:
-    AAudioServiceStreamBase();
+    AAudioServiceStreamBase(android::AAudioService &aAudioService);
+
     virtual ~AAudioServiceStreamBase();
 
     enum {
@@ -63,39 +69,53 @@
     /**
      * Open the device.
      */
-    virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
-                                 aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
+    virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request) = 0;
 
     virtual aaudio_result_t close();
 
     /**
-     * Start the flow of data.
+     * Start the flow of audio data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
      */
     virtual aaudio_result_t start();
 
     /**
-     * Stop the flow of data such that start() can resume with loss of data.
-     */
+     * Stop the flow of data so that start() can resume without loss of data.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+    */
     virtual aaudio_result_t pause();
 
     /**
-     * Stop the flow of data after data in buffer has played.
+     * Stop the flow of data after the currently queued data has finished playing.
+     *
+     * This is not guaranteed to be synchronous but it currently is.
+     * An AAUDIO_SERVICE_EVENT_STOPPED will be sent to the client when complete.
+     *
      */
     virtual aaudio_result_t stop();
 
     aaudio_result_t stopTimestampThread();
 
     /**
-     *  Discard any data held by the underlying HAL or Service.
+     * Discard any data held by the underlying HAL or Service.
+     *
+     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
      */
     virtual aaudio_result_t flush();
 
+
     virtual aaudio_result_t startClient(const android::AudioClient& client __unused,
                                         audio_port_handle_t *clientHandle __unused) {
+        ALOGD("AAudioServiceStreamBase::startClient(%p, ...) AAUDIO_ERROR_UNAVAILABLE", &client);
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
     virtual aaudio_result_t stopClient(audio_port_handle_t clientHandle __unused) {
+        ALOGD("AAudioServiceStreamBase::stopClient(%d) AAUDIO_ERROR_UNAVAILABLE", clientHandle);
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
@@ -130,14 +150,14 @@
         return mFramesPerBurst;
     }
 
-    int32_t calculateBytesPerFrame() const {
-        return mSamplesPerFrame * AAudioConvert_formatToSizeInBytes(mAudioFormat);
-    }
-
     void run() override; // to implement Runnable
 
     void disconnect();
 
+    const android::AudioClient &getAudioClient() {
+        return mMmapClient;
+    }
+
     uid_t getOwnerUserId() const {
         return mMmapClient.clientUid;
     }
@@ -157,8 +177,16 @@
         return mState;
     }
 
+    void onVolumeChanged(float volume);
+
 protected:
 
+    /**
+     * Open the device.
+     */
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+                         aaudio_sharing_mode_t sharingMode);
+
     void setState(aaudio_stream_state_t state) {
         mState = state;
     }
@@ -183,22 +211,21 @@
     pid_t                   mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     SharedRingBuffer*       mUpMessageQueue;
-    std::mutex              mLockUpMessageQueue;
+    std::mutex              mUpMessageQueueLock;
 
     AAudioThread            mAAudioThread;
     // This is used by one thread to tell another thread to exit. So it must be atomic.
-    std::atomic<bool>       mThreadEnabled;
+    std::atomic<bool>       mThreadEnabled{false};
 
-    aaudio_format_t         mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t                 mFramesPerBurst = 0;
-    int32_t                 mSamplesPerFrame = AAUDIO_UNSPECIFIED;
-    int32_t                 mSampleRate = AAUDIO_UNSPECIFIED;
-    int32_t                 mCapacityInFrames = AAUDIO_UNSPECIFIED;
-    android::AudioClient    mMmapClient;
+    android::AudioClient    mMmapClient; // set in open, used in MMAP start()
     audio_port_handle_t     mClientHandle = AUDIO_PORT_HANDLE_NONE;
 
     SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
 
+    android::AAudioService &mAudioService;
+    android::sp<AAudioServiceEndpoint> mServiceEndpoint;
+
 private:
     aaudio_handle_t         mHandle = -1;
 };
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 68dcaff..43595a4 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -19,41 +19,33 @@
 #include <utils/Log.h>
 
 #include <atomic>
+#include <iomanip>
+#include <iostream>
 #include <stdint.h>
 
 #include <utils/String16.h>
 #include <media/nbaio/AudioStreamOutSink.h>
 #include <media/MmapStreamInterface.h>
 
+#include "binding/AudioEndpointParcelable.h"
+#include "utility/AAudioUtilities.h"
+
+#include "AAudioServiceEndpointMMAP.h"
 #include "AAudioServiceStreamBase.h"
 #include "AAudioServiceStreamMMAP.h"
-#include "binding/AudioEndpointParcelable.h"
 #include "SharedMemoryProxy.h"
-#include "utility/AAudioUtilities.h"
 
 using android::base::unique_fd;
 using namespace android;
 using namespace aaudio;
 
-#define AAUDIO_BUFFER_CAPACITY_MIN    4 * 512
-#define AAUDIO_SAMPLE_RATE_DEFAULT    48000
-
-// This is an estimate of the time difference between the HW and the MMAP time.
-// TODO Get presentation timestamps from the HAL instead of using these estimates.
-#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS  (3 * AAUDIO_NANOS_PER_MILLISECOND)
-#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS   (-1 * AAUDIO_NANOS_PER_MILLISECOND)
-
 /**
  * Service Stream that uses an MMAP buffer.
  */
 
-AAudioServiceStreamMMAP::AAudioServiceStreamMMAP(const android::AudioClient& serviceClient,
+AAudioServiceStreamMMAP::AAudioServiceStreamMMAP(android::AAudioService &aAudioService,
                                                  bool inService)
-        : AAudioServiceStreamBase()
-        , mMmapStreamCallback(new MyMmapStreamCallback(*this))
-        , mPreviousFrameCounter(0)
-        , mMmapStream(nullptr)
-        , mServiceClient(serviceClient)
+        : AAudioServiceStreamBase(aAudioService)
         , mInService(inService) {
 }
 
@@ -61,170 +53,32 @@
     if (mState == AAUDIO_STREAM_STATE_CLOSED) {
         return AAUDIO_OK;
     }
+
     stop();
-    if (mMmapStream != 0) {
-        mMmapStream.clear(); // TODO review. Is that all we have to do?
-        // Apparently the above close is asynchronous. An attempt to open a new device
-        // right after a close can fail. Also some callbacks may still be in flight!
-        // FIXME Make closing synchronous.
-        AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
-    }
 
     return AAudioServiceStreamBase::close();
 }
 
 // Open stream on HAL and pass information about the shared memory buffer back to the client.
-aaudio_result_t AAudioServiceStreamMMAP::open(const aaudio::AAudioStreamRequest &request,
-                                       aaudio::AAudioStreamConfiguration &configurationOutput) {
-    const audio_attributes_t attributes = {
-        .content_type = AUDIO_CONTENT_TYPE_MUSIC,
-        .usage = AUDIO_USAGE_MEDIA,
-        .source = AUDIO_SOURCE_VOICE_RECOGNITION,
-        .flags = AUDIO_FLAG_LOW_LATENCY,
-        .tags = ""
-    };
-    audio_config_base_t config;
+aaudio_result_t AAudioServiceStreamMMAP::open(const aaudio::AAudioStreamRequest &request) {
 
-    aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+    sp<AAudioServiceStreamMMAP> keep(this);
+
+    aaudio_result_t result = AAudioServiceStreamBase::open(request,
+                                                           AAUDIO_SHARING_MODE_EXCLUSIVE);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamBase open returned %d", result);
         return result;
     }
 
-    const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
-    audio_port_handle_t deviceId = configurationInput.getDeviceId();
-    aaudio_direction_t direction = request.getDirection();
-
-    // Fill in config
-    aaudio_format_t aaudioFormat = configurationInput.getFormat();
-    if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
-        aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+    result = mServiceEndpoint->registerStream(keep);
+    if (result != AAUDIO_OK) {
+        goto error;
     }
-    config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
-
-    int32_t aaudioSampleRate = configurationInput.getSampleRate();
-    if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
-        aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
-    }
-    config.sample_rate = aaudioSampleRate;
-
-    int32_t aaudioSamplesPerFrame = configurationInput.getSamplesPerFrame();
-
-    if (direction == AAUDIO_DIRECTION_OUTPUT) {
-        config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
-                            ? AUDIO_CHANNEL_OUT_STEREO
-                            : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
-        mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
-
-    } else if (direction == AAUDIO_DIRECTION_INPUT) {
-        config.channel_mask =  (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
-                            ? AUDIO_CHANNEL_IN_STEREO
-                            : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
-        mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
-
-    } else {
-        ALOGE("openMmapStream - invalid direction = %d", direction);
-        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-    }
-
-    MmapStreamInterface::stream_direction_t streamDirection = (direction == AAUDIO_DIRECTION_OUTPUT)
-        ? MmapStreamInterface::DIRECTION_OUTPUT : MmapStreamInterface::DIRECTION_INPUT;
-
-    // Open HAL stream.
-    status_t status = MmapStreamInterface::openMmapStream(streamDirection,
-                                                          &attributes,
-                                                          &config,
-                                                          mMmapClient,
-                                                          &deviceId,
-                                                          mMmapStreamCallback,
-                                                          mMmapStream,
-                                                          &mPortHandle);
-    if (status != OK) {
-        ALOGE("openMmapStream returned status %d", status);
-        return AAUDIO_ERROR_UNAVAILABLE;
-    }
-
-    if (deviceId == AAUDIO_UNSPECIFIED) {
-        ALOGW("AAudioServiceStreamMMAP::open() - openMmapStream() failed to set deviceId");
-    }
-
-    // Create MMAP/NOIRQ buffer.
-    int32_t minSizeFrames = configurationInput.getBufferCapacity();
-    if (minSizeFrames <= 0) { // zero will get rejected
-        minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
-    }
-    status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
-    if (status != OK) {
-        ALOGE("AAudioServiceStreamMMAP::open() - createMmapBuffer() returned status %d",
-              status);
-        return AAUDIO_ERROR_UNAVAILABLE;
-    } else {
-        ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
-                ", Sharable FD: %s",
-              status,
-              abs(mMmapBufferinfo.buffer_size_frames),
-              mMmapBufferinfo.burst_size_frames,
-              mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
-    }
-
-    mCapacityInFrames = mMmapBufferinfo.buffer_size_frames;
-    // FIXME: the audio HAL indicates if the shared memory fd can be shared outside of audioserver
-    // by returning a negative buffer size
-    if (mCapacityInFrames < 0) {
-        // Exclusive mode is possible from any client
-        mCapacityInFrames = -mCapacityInFrames;
-    } else {
-        // exclusive mode is only possible if the final fd destination is inside audioserver
-        if ((mMmapClient.clientUid != mServiceClient.clientUid) &&
-                configurationInput.getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
-            // Fallback is handled by caller but indicate what is possible in case
-            // this is used in the future
-            configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
-            return AAUDIO_ERROR_UNAVAILABLE;
-        }
-    }
-
-    // Get information about the stream and pass it back to the caller.
-    mSamplesPerFrame = (direction == AAUDIO_DIRECTION_OUTPUT)
-                           ? audio_channel_count_from_out_mask(config.channel_mask)
-                           : audio_channel_count_from_in_mask(config.channel_mask);
-
-    // AAudio creates a copy of this FD and retains ownership of the copy.
-    // Assume that AudioFlinger will close the original shared_memory_fd.
-    mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
-    if (mAudioDataFileDescriptor.get() == -1) {
-        ALOGE("AAudioServiceStreamMMAP::open() - could not dup shared_memory_fd");
-        return AAUDIO_ERROR_INTERNAL; // TODO review
-    }
-    mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
-    mAudioFormat = AAudioConvert_androidToAAudioDataFormat(config.format);
-    mSampleRate = config.sample_rate;
-
-    // Scale up the burst size to meet the minimum equivalent in microseconds.
-    // This is to avoid waking the CPU too often when the HW burst is very small
-    // or at high sample rates.
-    int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
-    int32_t burstMicros = 0;
-    do {
-        if (burstMicros > 0) {  // skip first loop
-            mFramesPerBurst *= 2;
-        }
-        burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / mSampleRate;
-    } while (burstMicros < burstMinMicros);
-
-    ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
-          mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
-
-    ALOGD("AAudioServiceStreamMMAP::open() actual rate = %d, channels = %d, deviceId = %d\n",
-          mSampleRate, mSamplesPerFrame, deviceId);
-
-    // Fill in AAudioStreamConfiguration
-    configurationOutput.setSampleRate(mSampleRate);
-    configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
-    configurationOutput.setFormat(mAudioFormat);
-    configurationOutput.setDeviceId(deviceId);
 
     setState(AAUDIO_STREAM_STATE_OPEN);
+
+error:
     return AAUDIO_OK;
 }
 
@@ -235,18 +89,10 @@
     if (isRunning()) {
         return AAUDIO_OK;
     }
-    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
-    aaudio_result_t result;
-    status_t status = mMmapStream->start(mServiceClient, &mPortHandle);
-    if (status != OK) {
-        ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", status);
-        disconnect();
-        result = AAudioConvert_androidToAAudioResult(status);
-    } else {
-        result = AAudioServiceStreamBase::start();
-        if (!mInService && result == AAUDIO_OK) {
-            startClient(mMmapClient, &mClientHandle);
-        }
+
+    aaudio_result_t result = AAudioServiceStreamBase::start();
+    if (!mInService && result == AAUDIO_OK) {
+        startClient(mMmapClient, &mClientHandle);
     }
     return result;
 }
@@ -258,70 +104,50 @@
     if (!isRunning()) {
         return AAUDIO_OK;
     }
-    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
-    aaudio_result_t result1 = AAudioServiceStreamBase::pause();
+    aaudio_result_t result = AAudioServiceStreamBase::pause();
+    // TODO put before base::pause()?
     if (!mInService) {
         stopClient(mClientHandle);
     }
-    status_t status = mMmapStream->stop(mPortHandle);
-    mFramesRead.reset32();
-    return (result1 != AAUDIO_OK) ? result1 : AAudioConvert_androidToAAudioResult(status);
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamMMAP::stop() {
     if (!isRunning()) {
         return AAUDIO_OK;
     }
-    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
-    aaudio_result_t result1 = AAudioServiceStreamBase::stop();
+    aaudio_result_t result = AAudioServiceStreamBase::stop();
+    // TODO put before base::stop()?
     if (!mInService) {
         stopClient(mClientHandle);
     }
-    aaudio_result_t status = mMmapStream->stop(mPortHandle);
-    mFramesRead.reset32();
-    return (result1 != AAUDIO_OK) ? result1 :  AAudioConvert_androidToAAudioResult(status);
-}
-
-/**
- *  Discard any data held by the underlying HAL or Service.
- */
-aaudio_result_t AAudioServiceStreamMMAP::flush() {
-    if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
-    // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
-    return AAudioServiceStreamBase::flush();;
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
-                                                     audio_port_handle_t *clientHandle) {
-    return AAudioConvert_androidToAAudioResult(mMmapStream->start(client, clientHandle));
+                                                       audio_port_handle_t *clientHandle) {
+    aaudio_result_t result = mServiceEndpoint->startClient(client, clientHandle);
+    return result;
 }
 
 aaudio_result_t AAudioServiceStreamMMAP::stopClient(audio_port_handle_t clientHandle) {
-    return AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
+    aaudio_result_t result = mServiceEndpoint->stopClient(clientHandle);
+    return result;
 }
 
 // Get free-running DSP or DMA hardware position from the HAL.
 aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
-                                                                int64_t *timeNanos) {
-    struct audio_mmap_position position;
-    if (mMmapStream == nullptr) {
-        disconnect();
-        return AAUDIO_ERROR_NULL;
-    }
-    status_t status = mMmapStream->getMmapPosition(&position);
-    aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
-    if (result == AAUDIO_ERROR_UNAVAILABLE) {
-        ALOGW("sendCurrentTimestamp(): getMmapPosition() has no position data yet");
-    } else if (result != AAUDIO_OK) {
-        ALOGE("sendCurrentTimestamp(): getMmapPosition() returned status %d", status);
-        disconnect();
-    } else {
-        mFramesRead.update32(position.position_frames);
-
-        Timestamp timestamp(mFramesRead.get(), position.time_nanoseconds);
+                                                                  int64_t *timeNanos) {
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
+            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+    aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
+    if (result == AAUDIO_OK) {
+        Timestamp timestamp(*positionFrames, *timeNanos);
         mAtomicTimestamp.write(timestamp);
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
+    } else if (result != AAUDIO_ERROR_UNAVAILABLE) {
+        disconnect();
     }
     return result;
 }
@@ -329,51 +155,25 @@
 // Get timestamp that was written by getFreeRunningPosition()
 aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
                                                                 int64_t *timeNanos) {
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
+            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
     // TODO Get presentation timestamp from the HAL
     if (mAtomicTimestamp.isValid()) {
         Timestamp timestamp = mAtomicTimestamp.read();
         *positionFrames = timestamp.getPosition();
-        *timeNanos = timestamp.getNanoseconds() + mHardwareTimeOffsetNanos;
+        *timeNanos = timestamp.getNanoseconds() + serviceEndpointMMAP->getHardwareTimeOffsetNanos();
         return AAUDIO_OK;
     } else {
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 }
 
-void AAudioServiceStreamMMAP::onTearDown() {
-    ALOGD("AAudioServiceStreamMMAP::onTearDown() called");
-    disconnect();
-};
-
-void AAudioServiceStreamMMAP::onVolumeChanged(audio_channel_mask_t channels,
-                     android::Vector<float> values) {
-    // TODO do we really need a different volume for each channel?
-    float volume = values[0];
-    ALOGD("AAudioServiceStreamMMAP::onVolumeChanged() volume[0] = %f", volume);
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
-};
-
-void AAudioServiceStreamMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
-    ALOGD("AAudioServiceStreamMMAP::onRoutingChanged() called with %d, old = %d",
-          deviceId, mDeviceId);
-    if (mDeviceId != AUDIO_PORT_HANDLE_NONE  && mDeviceId != deviceId) {
-        disconnect();
-    }
-    mDeviceId = deviceId;
-};
-
 /**
  * Get an immutable description of the data queue from the HAL.
  */
 aaudio_result_t AAudioServiceStreamMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
 {
-    // Gather information on the data queue based on HAL info.
-    int32_t bytesPerFrame = calculateBytesPerFrame();
-    int32_t capacityInBytes = mCapacityInFrames * bytesPerFrame;
-    int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
-    parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
-    parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
-    parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
-    return AAUDIO_OK;
+    sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
+            static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+    return serviceEndpointMMAP->getDownDataDescription(parcelable);
 }
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index e631fd3..bf0aab3 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -37,19 +37,20 @@
 
 namespace aaudio {
 
-    /**
-     * Manage one memory mapped buffer that originated from a HAL.
-     */
-class AAudioServiceStreamMMAP
-    : public AAudioServiceStreamBase
-    , public android::MmapStreamCallback {
+
+/**
+ * These corresponds to an EXCLUSIVE mode MMAP client stream.
+ * It has exclusive use of one AAudioServiceEndpointMMAP to communicate with the underlying
+ * device or port.
+ */
+class AAudioServiceStreamMMAP : public AAudioServiceStreamBase {
 
 public:
-    AAudioServiceStreamMMAP(const android::AudioClient& serviceClient, bool inService);
+    AAudioServiceStreamMMAP(android::AAudioService &aAudioService,
+                            bool inService);
     virtual ~AAudioServiceStreamMMAP() = default;
 
-    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
-                                 aaudio::AAudioStreamConfiguration &configurationOutput) override;
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
     /**
      * Start the flow of audio data.
@@ -69,83 +70,28 @@
 
     aaudio_result_t stop() override;
 
-    /**
-     *  Discard any data held by the underlying HAL or Service.
-     *
-     * This is not guaranteed to be synchronous but it currently is.
-     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
-     */
-    aaudio_result_t flush() override;
+    aaudio_result_t startClient(const android::AudioClient& client,
+                                audio_port_handle_t *clientHandle) override;
+
+    aaudio_result_t stopClient(audio_port_handle_t clientHandle) override;
 
     aaudio_result_t close() override;
 
-    virtual aaudio_result_t startClient(const android::AudioClient& client,
-                                        audio_port_handle_t *clientHandle);
-
-    virtual aaudio_result_t stopClient(audio_port_handle_t clientHandle);
-
     /**
      * Send a MMAP/NOIRQ buffer timestamp to the client.
      */
-    aaudio_result_t sendCurrentTimestamp();
-
-    // -------------- Callback functions ---------------------
-    void onTearDown() override;
-
-    void onVolumeChanged(audio_channel_mask_t channels,
-                         android::Vector<float> values) override;
-
-    void onRoutingChanged(audio_port_handle_t deviceId) override;
 
 protected:
 
     aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
 
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
-    virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames,
-                                                 int64_t *timeNanos) override;
+
+    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
 
 private:
-    // This proxy class was needed to prevent a crash in AudioFlinger
-    // when the stream was closed.
-    class MyMmapStreamCallback : public android::MmapStreamCallback {
-    public:
-        explicit MyMmapStreamCallback(android::MmapStreamCallback &serviceCallback)
-            : mServiceCallback(serviceCallback){}
-        virtual ~MyMmapStreamCallback() = default;
 
-        void onTearDown() override {
-            mServiceCallback.onTearDown();
-        };
-
-        void onVolumeChanged(audio_channel_mask_t channels, android::Vector<float> values) override
-        {
-            mServiceCallback.onVolumeChanged(channels, values);
-        };
-
-        void onRoutingChanged(audio_port_handle_t deviceId) override {
-            mServiceCallback.onRoutingChanged(deviceId);
-        };
-
-    private:
-        android::MmapStreamCallback &mServiceCallback;
-    };
-
-    android::sp<MyMmapStreamCallback>   mMmapStreamCallback;
-    MonotonicCounter                    mFramesWritten;
-    MonotonicCounter                    mFramesRead;
-    int32_t                             mPreviousFrameCounter = 0;   // from HAL
-    int64_t                             mHardwareTimeOffsetNanos = 0; // TODO get from HAL
-
-
-    // Interface to the AudioFlinger MMAP support.
-    android::sp<android::MmapStreamInterface> mMmapStream;
-    struct audio_mmap_buffer_info             mMmapBufferinfo;
-    audio_port_handle_t                       mPortHandle = AUDIO_PORT_HANDLE_NONE;
-    audio_port_handle_t                       mDeviceId = AUDIO_PORT_HANDLE_NONE;
-    android::AudioClient                      mServiceClient;
-    bool                                      mInService = false;
-    android::base::unique_fd                  mAudioDataFileDescriptor;
+    bool                     mInService = false;
 };
 
 } // namespace aaudio
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 57990ce..834f39f 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -42,10 +42,9 @@
 #define MAX_FRAMES_PER_BUFFER       (32 * 1024)
 
 AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
-    : mAudioService(audioService)
+    : AAudioServiceStreamBase(audioService)
     , mTimestampPositionOffset(0)
-    , mXRunCount(0)
-    {
+    , mXRunCount(0) {
 }
 
 std::string AAudioServiceStreamShared::dumpHeader() {
@@ -57,6 +56,7 @@
 
 std::string AAudioServiceStreamShared::dump() const {
     std::stringstream result;
+
     result << AAudioServiceStreamBase::dump();
 
     auto fifo = mAudioDataQueue->getFifoBuffer();
@@ -116,87 +116,69 @@
     return capacityInFrames;
 }
 
-aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
-                     aaudio::AAudioStreamConfiguration &configurationOutput)  {
+aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request)  {
 
     sp<AAudioServiceStreamShared> keep(this);
 
-    aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+    aaudio_result_t result = AAudioServiceStreamBase::open(request, AAUDIO_SHARING_MODE_SHARED);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamBase open() returned %d", result);
         return result;
     }
 
     const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
-    aaudio_direction_t direction = request.getDirection();
 
-    AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
-    mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, configurationInput, direction);
-    if (mServiceEndpoint == nullptr) {
-        ALOGE("AAudioServiceStreamShared::open() mServiceEndPoint = %p", mServiceEndpoint);
-        return AAUDIO_ERROR_UNAVAILABLE;
-    }
 
     // Is the request compatible with the shared endpoint?
-    mAudioFormat = configurationInput.getFormat();
-    if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) {
-        mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
-    } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
-        ALOGE("AAudioServiceStreamShared::open() mAudioFormat = %d, need FLOAT", mAudioFormat);
+    setFormat(configurationInput.getFormat());
+    if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
+        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+    } else if (getFormat() != AAUDIO_FORMAT_PCM_FLOAT) {
+        ALOGE("AAudioServiceStreamShared::open() mAudioFormat = %d, need FLOAT", getFormat());
         result = AAUDIO_ERROR_INVALID_FORMAT;
         goto error;
     }
 
-    mSampleRate = configurationInput.getSampleRate();
-    if (mSampleRate == AAUDIO_UNSPECIFIED) {
-        mSampleRate = mServiceEndpoint->getSampleRate();
-    } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
+    setSampleRate(configurationInput.getSampleRate());
+    if (getSampleRate() == AAUDIO_UNSPECIFIED) {
+        setSampleRate(mServiceEndpoint->getSampleRate());
+    } else if (getSampleRate() != mServiceEndpoint->getSampleRate()) {
         ALOGE("AAudioServiceStreamShared::open() mSampleRate = %d, need %d",
-              mSampleRate, mServiceEndpoint->getSampleRate());
+              getSampleRate(), mServiceEndpoint->getSampleRate());
         result = AAUDIO_ERROR_INVALID_RATE;
         goto error;
     }
 
-    mSamplesPerFrame = configurationInput.getSamplesPerFrame();
-    if (mSamplesPerFrame == AAUDIO_UNSPECIFIED) {
-        mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
-    } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
+    setSamplesPerFrame(configurationInput.getSamplesPerFrame());
+    if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
+        setSamplesPerFrame(mServiceEndpoint->getSamplesPerFrame());
+    } else if (getSamplesPerFrame() != mServiceEndpoint->getSamplesPerFrame()) {
         ALOGE("AAudioServiceStreamShared::open() mSamplesPerFrame = %d, need %d",
-              mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
+              getSamplesPerFrame(), mServiceEndpoint->getSamplesPerFrame());
         result = AAUDIO_ERROR_OUT_OF_RANGE;
         goto error;
     }
 
-    mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
-    ALOGD("AAudioServiceStreamShared::open() mSampleRate = %d, mFramesPerBurst = %d",
-          mSampleRate, mFramesPerBurst);
-
-    mCapacityInFrames = calculateBufferCapacity(configurationInput.getBufferCapacity(),
-                                     mFramesPerBurst);
-    if (mCapacityInFrames < 0) {
-        result = mCapacityInFrames; // negative error code
-        mCapacityInFrames = 0;
+    setBufferCapacity(calculateBufferCapacity(configurationInput.getBufferCapacity(),
+                                     mFramesPerBurst));
+    if (getBufferCapacity() < 0) {
+        result = getBufferCapacity(); // negative error code
+        setBufferCapacity(0);
         goto error;
     }
 
     // Create audio data shared memory buffer for client.
     mAudioDataQueue = new SharedRingBuffer();
-    result = mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    result = mAudioDataQueue->allocate(calculateBytesPerFrame(), getBufferCapacity());
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::open() could not allocate FIFO with %d frames",
-              mCapacityInFrames);
+              getBufferCapacity());
         result = AAUDIO_ERROR_NO_MEMORY;
         goto error;
     }
 
     ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
-          mSampleRate, mSamplesPerFrame, mServiceEndpoint->getDeviceId());
-
-    // Fill in configuration for client.
-    configurationOutput.setSampleRate(mSampleRate);
-    configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
-    configurationOutput.setFormat(mAudioFormat);
-    configurationOutput.setDeviceId(mServiceEndpoint->getDeviceId());
+          getSampleRate(), getSamplesPerFrame(), mServiceEndpoint->getDeviceId());
 
     result = mServiceEndpoint->registerStream(keep);
     if (result != AAUDIO_OK) {
@@ -211,118 +193,14 @@
     return result;
 }
 
-/**
- * Start the flow of audio data.
- *
- * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
- */
-aaudio_result_t AAudioServiceStreamShared::start()  {
-    if (isRunning()) {
-        return AAUDIO_OK;
-    }
-    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
-    if (endpoint == nullptr) {
-        ALOGE("AAudioServiceStreamShared::start() missing endpoint");
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-    // For output streams, this will add the stream to the mixer.
-    aaudio_result_t result = endpoint->startStream(this);
-    if (result != AAUDIO_OK) {
-        ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
-        disconnect();
-    } else {
-        result = endpoint->getStreamInternal()->startClient(mMmapClient, &mClientHandle);
-        if (result == AAUDIO_OK) {
-            result = AAudioServiceStreamBase::start();
-        }
-    }
-    return result;
-}
-
-/**
- * Stop the flow of data so that start() can resume without loss of data.
- *
- * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
-*/
-aaudio_result_t AAudioServiceStreamShared::pause()  {
-    if (!isRunning()) {
-        return AAUDIO_OK;
-    }
-    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
-    if (endpoint == nullptr) {
-        ALOGE("AAudioServiceStreamShared::pause() missing endpoint");
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-    endpoint->getStreamInternal()->stopClient(mClientHandle);
-    aaudio_result_t result = endpoint->stopStream(this);
-    if (result != AAUDIO_OK) {
-        ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
-        disconnect(); // TODO should we return or pause Base first?
-    }
-    return AAudioServiceStreamBase::pause();
-}
-
-aaudio_result_t AAudioServiceStreamShared::stop()  {
-    if (!isRunning()) {
-        return AAUDIO_OK;
-    }
-    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
-    if (endpoint == nullptr) {
-        ALOGE("AAudioServiceStreamShared::stop() missing endpoint");
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-    endpoint->getStreamInternal()->stopClient(mClientHandle);
-    aaudio_result_t result = endpoint->stopStream(this);
-    if (result != AAUDIO_OK) {
-        ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
-        disconnect();
-    }
-    return AAudioServiceStreamBase::stop();
-}
-
-/**
- *  Discard any data held by the underlying HAL or Service.
- *
- * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
- */
-aaudio_result_t AAudioServiceStreamShared::flush()  {
-    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
-    if (endpoint == nullptr) {
-        ALOGE("AAudioServiceStreamShared::flush() missing endpoint");
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-    if (mState != AAUDIO_STREAM_STATE_PAUSED) {
-         ALOGE("AAudioServiceStreamShared::flush() stream not paused, state = %s",
-            AAudio_convertStreamStateToText(mState));
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-    // Data will get flushed when the client receives the FLUSHED event.
-    return AAudioServiceStreamBase::flush();
-}
 
 aaudio_result_t AAudioServiceStreamShared::close()  {
-    if (mState == AAUDIO_STREAM_STATE_CLOSED) {
-        return AAUDIO_OK;
-    }
+    aaudio_result_t result = AAudioServiceStreamBase::close();
 
-    stop();
+    delete mAudioDataQueue;
+    mAudioDataQueue = nullptr;
 
-    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
-    if (endpoint == nullptr) {
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-
-    endpoint->unregisterStream(this);
-
-    AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
-    mEndpointManager.closeEndpoint(endpoint);
-    mServiceEndpoint = nullptr;
-
-    if (mAudioDataQueue != nullptr) {
-        delete mAudioDataQueue;
-        mAudioDataQueue = nullptr;
-    }
-    return AAudioServiceStreamBase::close();
+    return result;
 }
 
 /**
@@ -341,9 +219,10 @@
     mAtomicTimestamp.write(timestamp);
 }
 
-// Get timestamp that was written by the real-time service thread, eg. mixer.
+// Get timestamp that was written by mixer or distributor.
 aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
-                                                                int64_t *timeNanos) {
+                                                                  int64_t *timeNanos) {
+    // TODO Get presentation timestamp from the HAL
     if (mAtomicTimestamp.isValid()) {
         Timestamp timestamp = mAtomicTimestamp.read();
         *positionFrames = timestamp.getPosition();
@@ -356,7 +235,7 @@
 
 // Get timestamp from lower level service.
 aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames,
-                                                              int64_t *timeNanos) {
+                                                                int64_t *timeNanos) {
 
     aaudio_result_t result = mServiceEndpoint->getTimestamp(positionFrames, timeNanos);
     if (result == AAUDIO_OK) {
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 36a56b8..bc86dcc 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -50,37 +50,7 @@
 
     std::string dump() const override;
 
-    aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
-                         aaudio::AAudioStreamConfiguration &configurationOutput) override;
-
-    /**
-     * Start the flow of audio data.
-     *
-     * This is not guaranteed to be synchronous but it currently is.
-     * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
-     */
-    aaudio_result_t start() override;
-
-    /**
-     * Stop the flow of data so that start() can resume without loss of data.
-     *
-     * This is not guaranteed to be synchronous but it currently is.
-     * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
-    */
-    aaudio_result_t pause() override;
-
-    /**
-     * Stop the flow of data after data in buffer has played.
-     */
-    aaudio_result_t stop() override;
-
-    /**
-     *  Discard any data held by the underlying HAL or Service.
-     *
-     * This is not guaranteed to be synchronous but it currently is.
-     * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
-     */
-    aaudio_result_t flush() override;
+    aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
     aaudio_result_t close() override;
 
@@ -109,8 +79,7 @@
 
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
 
-    virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames,
-                                                 int64_t *timeNanos) override;
+    aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
 
     /**
      * @param requestedCapacityFrames
@@ -121,12 +90,11 @@
                                             int32_t framesPerBurst);
 
 private:
-    android::AAudioService  &mAudioService;
-    AAudioServiceEndpoint   *mServiceEndpoint = nullptr;
     SharedRingBuffer        *mAudioDataQueue = nullptr;
 
     std::atomic<int64_t>     mTimestampPositionOffset;
     std::atomic<int32_t>     mXRunCount;
+
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index a896a7a..1b74ad3 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -31,7 +31,9 @@
     AAudioService.cpp \
     AAudioServiceEndpoint.cpp \
     AAudioServiceEndpointCapture.cpp \
+    AAudioServiceEndpointMMAP.cpp \
     AAudioServiceEndpointPlay.cpp \
+    AAudioServiceEndpointShared.cpp \
     AAudioServiceStreamBase.cpp \
     AAudioServiceStreamMMAP.cpp \
     AAudioServiceStreamShared.cpp \