Merge "NuPlayerDriver: put player in paused state when reaching EOS." into lmp-dev
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 2442219..9cc208e 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -278,6 +278,7 @@
     bool                        mPrepareSync;
     status_t                    mPrepareStatus;
     audio_stream_type_t         mStreamType;
+    Parcel*                     mAudioAttributesParcel;
     bool                        mLoop;
     float                       mLeftVolume;
     float                       mRightVolume;
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 6cd377a..9611ac7 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -50,6 +50,7 @@
     mListener = NULL;
     mCookie = NULL;
     mStreamType = AUDIO_STREAM_MUSIC;
+    mAudioAttributesParcel = NULL;
     mCurrentPosition = -1;
     mSeekPosition = -1;
     mCurrentState = MEDIA_PLAYER_IDLE;
@@ -68,6 +69,10 @@
 MediaPlayer::~MediaPlayer()
 {
     ALOGV("destructor");
+    if (mAudioAttributesParcel != NULL) {
+        delete mAudioAttributesParcel;
+        mAudioAttributesParcel = NULL;
+    }
     AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
     disconnect();
     IPCThreadState::self()->flushCommands();
@@ -237,6 +242,9 @@
 {
     if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
         mPlayer->setAudioStreamType(mStreamType);
+        if (mAudioAttributesParcel != NULL) {
+            mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
+        }
         mCurrentState = MEDIA_PLAYER_PREPARING;
         return mPlayer->prepareAsync();
     }
@@ -662,8 +670,17 @@
     if (mPlayer != NULL) {
         return  mPlayer->setParameter(key, request);
     }
-    ALOGV("setParameter: no active player");
-    return INVALID_OPERATION;
+    switch (key) {
+    case KEY_PARAMETER_AUDIO_ATTRIBUTES:
+        // no player, save the marshalled audio attributes
+        if (mAudioAttributesParcel != NULL) { delete mAudioAttributesParcel; };
+        mAudioAttributesParcel = new Parcel();
+        mAudioAttributesParcel->appendFrom(&request, 0, request.dataSize());
+        return OK;
+    default:
+        ALOGV("setParameter: no active player");
+        return INVALID_OPERATION;
+    }
 }
 
 status_t MediaPlayer::getParameter(int key, Parcel *reply)
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index dacb144..3e0fc0d 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -60,7 +60,7 @@
     return OK;
 }
 
-player_type MediaPlayerFactory::getDefaultPlayerType() {
+static player_type getDefaultPlayerType() {
     char value[PROPERTY_VALUE_MAX];
     if (property_get("media.stagefright.use-awesome", value, NULL)
             && (!strcmp("1", value) || !strcasecmp("true", value))) {
@@ -181,16 +181,19 @@
                                int64_t offset,
                                int64_t /*length*/,
                                float /*curScore*/) {
-        char buf[20];
-        lseek(fd, offset, SEEK_SET);
-        read(fd, buf, sizeof(buf));
-        lseek(fd, offset, SEEK_SET);
+        if (getDefaultPlayerType()
+                == STAGEFRIGHT_PLAYER) {
+            char buf[20];
+            lseek(fd, offset, SEEK_SET);
+            read(fd, buf, sizeof(buf));
+            lseek(fd, offset, SEEK_SET);
 
-        uint32_t ident = *((uint32_t*)buf);
+            uint32_t ident = *((uint32_t*)buf);
 
-        // Ogg vorbis?
-        if (ident == 0x5367674f) // 'OggS'
-            return 1.0;
+            // Ogg vorbis?
+            if (ident == 0x5367674f) // 'OggS'
+                return 1.0;
+        }
 
         return 0.0;
     }
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index 5ddde19..55ff918 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -71,7 +71,6 @@
 
     static status_t registerFactory_l(IFactory* factory,
                                       player_type type);
-    static player_type getDefaultPlayerType();
 
     static Mutex       sLock;
     static tFactoryMap sFactoryMap;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index f257ef3..8e1987a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -32,6 +32,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 #include "../../libstagefright/include/DRMExtractor.h"
 #include "../../libstagefright/include/NuCachedSource2.h"
 #include "../../libstagefright/include/WVMExtractor.h"
@@ -318,7 +319,14 @@
     }
 
     if (mVideoTrack.mSource != NULL) {
-        notifyVideoSizeChanged(getFormat(false /* audio */));
+        sp<MetaData> meta = doGetFormatMeta(false /* audio */);
+        sp<AMessage> msg = new AMessage;
+        err = convertMetaDataToMessage(meta, &msg);
+        if(err != OK) {
+            notifyPreparedAndCleanup(err);
+            return;
+        }
+        notifyVideoSizeChanged(msg);
     }
 
     notifyFlagsChanged(
@@ -422,7 +430,7 @@
         mAudioTrack.mPackets =
             new AnotherPacketSource(mAudioTrack.mSource->getFormat());
 
-        readBuffer(MEDIA_TRACK_TYPE_AUDIO);
+        postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
     }
 
     if (mVideoTrack.mSource != NULL) {
@@ -430,7 +438,7 @@
         mVideoTrack.mPackets =
             new AnotherPacketSource(mVideoTrack.mSource->getFormat());
 
-        readBuffer(MEDIA_TRACK_TYPE_VIDEO);
+        postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
 
     setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
@@ -459,6 +467,8 @@
     if (mDecryptHandle != NULL) {
         mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
     }
+    mSubtitleTrack.mPackets = new AnotherPacketSource(NULL);
+    mTimedTextTrack.mPackets = new AnotherPacketSource(NULL);
 }
 
 status_t NuPlayer::GenericSource::feedMoreTSData() {
@@ -615,6 +625,37 @@
           }
           break;
       }
+
+      case kWhatGetFormat:
+      {
+          onGetFormatMeta(msg);
+          break;
+      }
+
+      case kWhatGetSelectedTrack:
+      {
+          onGetSelectedTrack(msg);
+          break;
+      }
+
+      case kWhatSelectTrack:
+      {
+          onSelectTrack(msg);
+          break;
+      }
+
+      case kWhatSeek:
+      {
+          onSeek(msg);
+          break;
+      }
+
+      case kWhatReadBuffer:
+      {
+          onReadBuffer(msg);
+          break;
+      }
+
       default:
           Source::onMessageReceived(msg);
           break;
@@ -690,6 +731,34 @@
 }
 
 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
+    sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+    msg->setInt32("audio", audio);
+
+    sp<AMessage> response;
+    void *format;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findPointer("format", &format));
+        return (MetaData *)format;
+    } else {
+        return NULL;
+    }
+}
+
+void NuPlayer::GenericSource::onGetFormatMeta(sp<AMessage> msg) const {
+    int32_t audio;
+    CHECK(msg->findInt32("audio", &audio));
+
+    sp<AMessage> response = new AMessage;
+    sp<MetaData> format = doGetFormatMeta(audio);
+    response->setPointer("format", format.get());
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
     sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
 
     if (source == NULL) {
@@ -709,7 +778,7 @@
 
     if (mIsWidevine && !audio) {
         // try to read a buffer as we may not have been able to the last time
-        readBuffer(MEDIA_TRACK_TYPE_VIDEO, -1ll);
+        postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
     }
 
     status_t finalResult;
@@ -720,18 +789,7 @@
     status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
 
     if (!track->mPackets->hasBufferAvailable(&finalResult)) {
-        readBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO, -1ll);
-    }
-
-    if (mSubtitleTrack.mSource == NULL && mTimedTextTrack.mSource == NULL) {
-        return result;
-    }
-
-    if (mSubtitleTrack.mSource != NULL) {
-        CHECK(mSubtitleTrack.mPackets != NULL);
-    }
-    if (mTimedTextTrack.mSource != NULL) {
-        CHECK(mTimedTextTrack.mPackets != NULL);
+        postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
     }
 
     if (result != OK) {
@@ -825,6 +883,35 @@
 }
 
 ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
+    sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, id());
+    msg->setInt32("type", type);
+
+    sp<AMessage> response;
+    int32_t index;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("index", &index));
+        return index;
+    } else {
+        return -1;
+    }
+}
+
+void NuPlayer::GenericSource::onGetSelectedTrack(sp<AMessage> msg) const {
+    int32_t tmpType;
+    CHECK(msg->findInt32("type", &tmpType));
+    media_track_type type = (media_track_type)tmpType;
+
+    sp<AMessage> response = new AMessage;
+    ssize_t index = doGetSelectedTrack(type);
+    response->setInt32("index", index);
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+ssize_t NuPlayer::GenericSource::doGetSelectedTrack(media_track_type type) const {
     const Track *track = NULL;
     switch (type) {
     case MEDIA_TRACK_TYPE_VIDEO:
@@ -852,6 +939,34 @@
 
 status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
     ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
+    sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
+    msg->setInt32("trackIndex", trackIndex);
+    msg->setInt32("select", trackIndex);
+
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
+void NuPlayer::GenericSource::onSelectTrack(sp<AMessage> msg) {
+    int32_t trackIndex, select;
+    CHECK(msg->findInt32("trackIndex", &trackIndex));
+    CHECK(msg->findInt32("select", &select));
+
+    sp<AMessage> response = new AMessage;
+    status_t err = doSelectTrack(trackIndex, select);
+    response->setInt32("err", err);
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select) {
     if (trackIndex >= mSources.size()) {
         return BAD_INDEX;
     }
@@ -922,6 +1037,32 @@
 }
 
 status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    msg->setInt64("seekTimeUs", seekTimeUs);
+
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
+}
+
+void NuPlayer::GenericSource::onSeek(sp<AMessage> msg) {
+    int64_t seekTimeUs;
+    CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+
+    sp<AMessage> response = new AMessage;
+    status_t err = doSeek(seekTimeUs);
+    response->setInt32("err", err);
+
+    uint32_t replyID;
+    CHECK(msg->senderAwaitsResponse(&replyID));
+    response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
     if (mVideoTrack.mSource != NULL) {
         int64_t actualTimeUs;
         readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
@@ -1006,6 +1147,19 @@
     return ab;
 }
 
+void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
+    sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+    msg->setInt32("trackType", trackType);
+    msg->post();
+}
+
+void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) {
+    int32_t tmpType;
+    CHECK(msg->findInt32("trackType", &tmpType));
+    media_track_type trackType = (media_track_type)tmpType;
+    readBuffer(trackType);
+}
+
 void NuPlayer::GenericSource::readBuffer(
         media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
     Track *track;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 1f13120..50ff98a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -84,6 +84,11 @@
         kWhatSendTimedTextData,
         kWhatChangeAVSource,
         kWhatPollBuffering,
+        kWhatGetFormat,
+        kWhatGetSelectedTrack,
+        kWhatSelectTrack,
+        kWhatSeek,
+        kWhatReadBuffer,
     };
 
     Vector<sp<MediaSource> > mSources;
@@ -140,6 +145,18 @@
 
     void notifyPreparedAndCleanup(status_t err);
 
+    void onGetFormatMeta(sp<AMessage> msg) const;
+    sp<MetaData> doGetFormatMeta(bool audio) const;
+
+    void onGetSelectedTrack(sp<AMessage> msg) const;
+    ssize_t doGetSelectedTrack(media_track_type type) const;
+
+    void onSelectTrack(sp<AMessage> msg);
+    status_t doSelectTrack(size_t trackIndex, bool select);
+
+    void onSeek(sp<AMessage> msg);
+    status_t doSeek(int64_t seekTimeUs);
+
     void onPrepareAsync();
 
     void fetchTextData(
@@ -155,6 +172,8 @@
             media_track_type trackType,
             int64_t *actualTimeUs = NULL);
 
+    void postReadBuffer(media_track_type trackType);
+    void onReadBuffer(sp<AMessage> msg);
     void readBuffer(
             media_track_type trackType,
             int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 19a5908..9b03b71 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2313,7 +2313,6 @@
         return 0;
     }
     OMX_U32 ret = frameRate * iFramesInterval;
-    CHECK(ret > 1);
     return ret;
 }
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 0064293..1729f93 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -2810,7 +2810,6 @@
 
     {
         if (objectType == AOT_SBR || objectType == AOT_PS) {
-            const int32_t extensionSamplingFrequency = br.getBits(4);
             objectType = br.getBits(5);
 
             if (objectType == AOT_ESCAPE) {
@@ -2828,9 +2827,30 @@
                 const int32_t coreCoderDelay = br.getBits(14);
             }
 
-            const int32_t extensionFlag = br.getBits(1);
+            int32_t extensionFlag = -1;
+            if (br.numBitsLeft() > 0) {
+                extensionFlag = br.getBits(1);
+            } else {
+                switch (objectType) {
+                // 14496-3 4.5.1.1 extensionFlag
+                case AOT_AAC_LC:
+                    extensionFlag = 0;
+                    break;
+                case AOT_ER_AAC_LC:
+                case AOT_ER_AAC_SCAL:
+                case AOT_ER_BSAC:
+                case AOT_ER_AAC_LD:
+                    extensionFlag = 1;
+                    break;
+                default:
+                    TRESPASS();
+                    break;
+                }
+                ALOGW("csd missing extension flag; assuming %d for object type %u.",
+                        extensionFlag, objectType);
+            }
 
-            if (numChannels == 0 ) {
+            if (numChannels == 0) {
                 int32_t channelsEffectiveNum = 0;
                 int32_t channelsNum = 0;
                 const int32_t ElementInstanceTag = br.getBits(4);
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 78758da..a8806c8 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -994,7 +994,6 @@
         return 0;
     }
     OMX_U32 ret = frameRate * iFramesInterval - 1;
-    CHECK(ret > 1);
     return ret;
 }
 
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index da50c56..1fdb244 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -338,7 +338,7 @@
             status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                                                              binder,
                                                              String16("TimedEventQueue"),
-                                                             String16("media"));
+                                                             String16("media"));    // not oneway
             IPCThreadState::self()->restoreCallingIdentity(token);
             if (status == NO_ERROR) {
                 mWakeLockToken = binder;
@@ -363,7 +363,7 @@
         CHECK(mWakeLockToken != 0);
         if (mPowerManager != 0) {
             int64_t token = IPCThreadState::self()->clearCallingIdentity();
-            mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+            mPowerManager->releaseWakeLock(mWakeLockToken, 0);  // not oneway
             IPCThreadState::self()->restoreCallingIdentity(token);
         }
         mWakeLockToken.clear();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 942bff6..97b1753 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -662,12 +662,14 @@
                     binder,
                     getWakeLockTag(),
                     String16("media"),
-                    uid);
+                    uid,
+                    true /* FIXME force oneway contrary to .aidl */);
         } else {
             status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                     binder,
                     getWakeLockTag(),
-                    String16("media"));
+                    String16("media"),
+                    true /* FIXME force oneway contrary to .aidl */);
         }
         if (status == NO_ERROR) {
             mWakeLockToken = binder;
@@ -687,7 +689,8 @@
     if (mWakeLockToken != 0) {
         ALOGV("releaseWakeLock_l() %s", mName);
         if (mPowerManager != 0) {
-            mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+            mPowerManager->releaseWakeLock(mWakeLockToken, 0,
+                    true /* FIXME force oneway contrary to .aidl */);
         }
         mWakeLockToken.clear();
     }
@@ -723,7 +726,8 @@
     if (mPowerManager != 0) {
         sp<IBinder> binder = new BBinder();
         status_t status;
-        status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array());
+        status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
+                    true /* FIXME force oneway contrary to .aidl */);
         ALOGV("acquireWakeLock_l() %s status %d", mName, status);
     }
 }
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index a805923..6446a6e 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -4634,8 +4634,15 @@
         //  - one A2DP device + another device: happens with duplicated output. In this case
         // retain the device on the A2DP output as the other must not correspond to an active
         // selection if not the speaker.
+        //  - HDMI-CEC system audio mode only output: give priority to available item in order.
         if (device & AUDIO_DEVICE_OUT_SPEAKER) {
             device = AUDIO_DEVICE_OUT_SPEAKER;
+        } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
+            device = AUDIO_DEVICE_OUT_HDMI_ARC;
+        } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
+            device = AUDIO_DEVICE_OUT_AUX_LINE;
+        } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
+            device = AUDIO_DEVICE_OUT_SPDIF;
         } else {
             device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
         }
@@ -6032,14 +6039,26 @@
         return 0;
     }
 
+    // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+        uint32_t samplingRate = UINT_MAX;
+        for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+            if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
+                samplingRate = mSamplingRates[i];
+            }
+        }
+        return (samplingRate == UINT_MAX) ? 0 : samplingRate;
+    }
+
     uint32_t samplingRate = 0;
     uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
 
     // For mixed output and inputs, use max mixer sampling rates. Do not
     // limit sampling rate otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) ||
-            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+    if (mType != AUDIO_PORT_TYPE_MIX) {
         maxRate = UINT_MAX;
     }
     for (size_t i = 0; i < mSamplingRates.size(); i ++) {
@@ -6056,16 +6075,35 @@
     if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
         return AUDIO_CHANNEL_NONE;
     }
-
     audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
+
+    // For direct outputs, pick minimum channel count: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+        uint32_t channelCount = UINT_MAX;
+        for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+            uint32_t cnlCount;
+            if (mUseInChannelMask) {
+                cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+            } else {
+                cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+            }
+            if ((cnlCount < channelCount) && (cnlCount > 0)) {
+                channelMask = mChannelMasks[i];
+                channelCount = cnlCount;
+            }
+        }
+        return channelMask;
+    }
+
     uint32_t channelCount = 0;
     uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
 
     // For mixed output and inputs, use max mixer channel count. Do not
     // limit channel count otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) ||
-            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+    if (mType != AUDIO_PORT_TYPE_MIX) {
         maxCount = UINT_MAX;
     }
     for (size_t i = 0; i < mChannelMasks.size(); i ++) {
@@ -6077,6 +6115,7 @@
         }
         if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
             channelMask = mChannelMasks[i];
+            channelCount = cnlCount;
         }
     }
     return channelMask;