Merge "release camera without holding CameraSource mutex" into lmp-dev
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index d77ddaf..fcccc6d 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -120,6 +120,7 @@
         kWhatSetParameters           = 'setP',
         kWhatSubmitOutputMetaDataBufferIfEOS = 'subm',
         kWhatOMXDied                 = 'OMXd',
+        kWhatReleaseCodecInstance    = 'relC',
     };
 
     enum {
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index bca78b9..54a4e8b 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -205,6 +205,7 @@
         kFlagIsEncoder                  = 256,
         kFlagGatherCodecSpecificData    = 512,
         kFlagIsAsync                    = 1024,
+        kFlagIsComponentAllocated       = 2048,
     };
 
     struct BufferInfo {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index e5c83dd..86ce385 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -66,8 +66,10 @@
       mAudioQueueGeneration(0),
       mVideoQueueGeneration(0),
       mAudioFirstAnchorTimeMediaUs(-1),
-      mVideoAnchorTimeMediaUs(-1),
-      mVideoAnchorTimeRealUs(-1),
+      mAnchorTimeMediaUs(-1),
+      mAnchorTimeRealUs(-1),
+      mAnchorNumFramesWritten(-1),
+      mAnchorMaxMediaUs(-1),
       mVideoLateByUs(0ll),
       mHasAudio(false),
       mHasVideo(false),
@@ -82,7 +84,9 @@
       mAudioRenderingStartGeneration(0),
       mAudioOffloadPauseTimeoutGeneration(0),
       mAudioOffloadTornDown(false),
-      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER) {
+      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
+      mTotalBuffersQueued(0),
+      mLastAudioBufferDrained(0) {
     readProperties();
 }
 
@@ -140,7 +144,7 @@
     // CHECK(mAudioQueue.empty());
     // CHECK(mVideoQueue.empty());
     setAudioFirstAnchorTime(-1);
-    setVideoAnchorTime(-1, -1);
+    setAnchorTime(-1, -1);
     setVideoLateByUs(0);
     mSyncQueues = false;
 }
@@ -171,28 +175,35 @@
     return getCurrentPosition(mediaUs, ALooper::GetNowUs());
 }
 
-status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs, int64_t nowUs) {
+status_t NuPlayer::Renderer::getCurrentPosition(
+        int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo) {
     Mutex::Autolock autoLock(mTimeLock);
     if (!mHasAudio && !mHasVideo) {
         return NO_INIT;
     }
 
-    int64_t positionUs = 0;
-    if (!mHasAudio) {
-        if (mVideoAnchorTimeMediaUs < 0) {
-            return NO_INIT;
-        }
-        positionUs = (nowUs - mVideoAnchorTimeRealUs) + mVideoAnchorTimeMediaUs;
-
-        if (mPauseStartedTimeRealUs != -1) {
-            positionUs -= (nowUs - mPauseStartedTimeRealUs);
-        }
-    } else {
-        if (mAudioFirstAnchorTimeMediaUs < 0) {
-            return NO_INIT;
-        }
-        positionUs = mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
+    if (mAnchorTimeMediaUs < 0) {
+        return NO_INIT;
     }
+
+    int64_t positionUs = (nowUs - mAnchorTimeRealUs) + mAnchorTimeMediaUs;
+
+    if (mPauseStartedTimeRealUs != -1) {
+        positionUs -= (nowUs - mPauseStartedTimeRealUs);
+    }
+
+    // limit position to the last queued media time (for video only stream
+    // position will be discrete as we don't know how long each frame lasts)
+    if (mAnchorMaxMediaUs >= 0 && !allowPastQueuedVideo) {
+        if (positionUs > mAnchorMaxMediaUs) {
+            positionUs = mAnchorMaxMediaUs;
+        }
+    }
+
+    if (positionUs < mAudioFirstAnchorTimeMediaUs) {
+        positionUs = mAudioFirstAnchorTimeMediaUs;
+    }
+
     *mediaUs = (positionUs <= 0) ? 0 : positionUs;
     return OK;
 }
@@ -218,10 +229,15 @@
     }
 }
 
-void NuPlayer::Renderer::setVideoAnchorTime(int64_t mediaUs, int64_t realUs) {
+void NuPlayer::Renderer::setAnchorTime(
+        int64_t mediaUs, int64_t realUs, int64_t numFramesWritten, bool resume) {
     Mutex::Autolock autoLock(mTimeLock);
-    mVideoAnchorTimeMediaUs = mediaUs;
-    mVideoAnchorTimeRealUs = realUs;
+    mAnchorTimeMediaUs = mediaUs;
+    mAnchorTimeRealUs = realUs;
+    mAnchorNumFramesWritten = numFramesWritten;
+    if (resume) {
+        mPauseStartedTimeRealUs = -1;
+    }
 }
 
 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
@@ -361,6 +377,19 @@
             break;
         }
 
+        case kWhatPostDrainVideoQueue:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation != mVideoQueueGeneration) {
+                break;
+            }
+
+            mDrainVideoQueuePending = false;
+            postDrainVideoQueue();
+            break;
+        }
+
         case kWhatQueueBuffer:
         {
             onQueueBuffer(msg);
@@ -549,6 +578,14 @@
         notifyIfMediaRenderingStarted();
     }
 
+    if (mAudioFirstAnchorTimeMediaUs >= 0) {
+        int64_t nowUs = ALooper::GetNowUs();
+        setAnchorTime(mAudioFirstAnchorTimeMediaUs, nowUs - getPlayedOutAudioDurationUs(nowUs));
+    }
+
+    // we don't know how much data we are queueing for offloaded tracks
+    mAnchorMaxMediaUs = -1;
+
     if (hasEOS) {
         (new AMessage(kWhatStopAudioSink, id()))->post();
     }
@@ -580,6 +617,8 @@
     while (numBytesAvailableToWrite > 0 && !mAudioQueue.empty()) {
         QueueEntry *entry = &*mAudioQueue.begin();
 
+        mLastAudioBufferDrained = entry->mBufferOrdinal;
+
         if (entry->mBuffer == NULL) {
             // EOS
             int64_t postEOSDelayUs = 0;
@@ -597,8 +636,7 @@
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
             ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
-
-            setAudioFirstAnchorTimeIfNeeded(mediaTimeUs);
+            onNewAudioMediaTime(mediaTimeUs);
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -647,6 +685,11 @@
             break;
         }
     }
+    mAnchorMaxMediaUs =
+        mAnchorTimeMediaUs +
+                (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
+                        * 1000LL * mAudioSink->msecsPerFrame());
+
     return !mAudioQueue.empty();
 }
 
@@ -658,12 +701,26 @@
 
 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
     int64_t currentPositionUs;
-    if (getCurrentPosition(&currentPositionUs, nowUs) != OK) {
-        currentPositionUs = 0;
+    if (getCurrentPosition(&currentPositionUs, nowUs, true /* allowPastQueuedVideo */) != OK) {
+        // If failed to get current position, e.g. due to audio clock is not ready, then just
+        // play out video immediately without delay.
+        return nowUs;
     }
     return (mediaTimeUs - currentPositionUs) + nowUs;
 }
 
+void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
+    // TRICKY: vorbis decoder generates multiple frames with the same
+    // timestamp, so only update on the first frame with a given timestamp
+    if (mediaTimeUs == mAnchorTimeMediaUs) {
+        return;
+    }
+    setAudioFirstAnchorTimeIfNeeded(mediaTimeUs);
+    int64_t nowUs = ALooper::GetNowUs();
+    setAnchorTime(
+            mediaTimeUs, nowUs + getPendingAudioPlayoutDurationUs(nowUs), mNumFramesWritten);
+}
+
 void NuPlayer::Renderer::postDrainVideoQueue() {
     if (mDrainVideoQueuePending
             || mSyncQueues
@@ -698,12 +755,34 @@
         int64_t mediaTimeUs;
         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
 
-        if (mVideoAnchorTimeMediaUs < 0) {
-            setVideoAnchorTime(mediaTimeUs, nowUs);
+        if (mAnchorTimeMediaUs < 0) {
+            setAnchorTime(mediaTimeUs, nowUs);
             realTimeUs = nowUs;
         } else {
             realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
         }
+        if (!mHasAudio) {
+            mAnchorMaxMediaUs = mediaTimeUs + 100000; // smooth out videos >= 10fps
+        }
+
+        // Heuristics to handle situation when media time changed without a
+        // discontinuity. If we have not drained an audio buffer that was
+        // received after this buffer, repost in 10 msec. Otherwise repost
+        // in 500 msec.
+        delayUs = realTimeUs - nowUs;
+        if (delayUs > 500000) {
+            int64_t postDelayUs = 500000;
+            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
+                postDelayUs = 10000;
+            }
+            msg->setWhat(kWhatPostDrainVideoQueue);
+            msg->post(postDelayUs);
+            mVideoScheduler->restart();
+            ALOGI("possible video time jump of %dms, retrying in %dms",
+                    (int)(delayUs / 1000), (int)(postDelayUs / 1000));
+            mDrainVideoQueuePending = true;
+            return;
+        }
     }
 
     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
@@ -769,14 +848,14 @@
         } else {
             ALOGV("rendering video at media time %.2f secs",
                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
-                    (realTimeUs + mVideoAnchorTimeMediaUs - mVideoAnchorTimeRealUs)) / 1E6);
+                    (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
         }
     } else {
         setVideoLateByUs(0);
-        if (!mVideoSampleReceived) {
+        if (!mVideoSampleReceived && !mHasAudio) {
             // This will ensure that the first frame after a flush won't be used as anchor
             // when renderer is in paused state, because resume can happen any time after seek.
-            setVideoAnchorTime(-1, -1);
+            setAnchorTime(-1, -1);
         }
     }
 
@@ -843,6 +922,7 @@
     entry.mNotifyConsumed = notifyConsumed;
     entry.mOffset = 0;
     entry.mFinalResult = OK;
+    entry.mBufferOrdinal = ++mTotalBuffersQueued;
 
     if (audio) {
         Mutex::Autolock autoLock(mLock);
@@ -1054,6 +1134,7 @@
     }
     CHECK(!mDrainAudioQueuePending);
     mNumFramesWritten = 0;
+    mAnchorNumFramesWritten = -1;
     uint32_t written;
     if (mAudioSink->getFramesWritten(&written) == OK) {
         mNumFramesWritten = written;
@@ -1108,9 +1189,9 @@
     mPaused = false;
     if (mPauseStartedTimeRealUs != -1) {
         int64_t newAnchorRealUs =
-            mVideoAnchorTimeRealUs + ALooper::GetNowUs() - mPauseStartedTimeRealUs;
-        setVideoAnchorTime(mVideoAnchorTimeMediaUs, newAnchorRealUs);
-        setPauseStartedTimeRealUs(-1);
+            mAnchorTimeRealUs + ALooper::GetNowUs() - mPauseStartedTimeRealUs;
+        setAnchorTime(
+                mAnchorTimeMediaUs, newAnchorRealUs, mAnchorNumFramesWritten, true /* resume */);
     }
 
     if (!mAudioQueue.empty()) {
@@ -1175,7 +1256,7 @@
 
     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
     //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
-    int64_t durationUs = (int32_t)numFramesPlayed * 1000LL * mAudioSink->msecsPerFrame()
+    int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000LL * mAudioSink->msecsPerFrame())
             + nowUs - numFramesPlayedAt;
     if (durationUs < 0) {
         // Occurs when numFramesPlayed position is very small and the following:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 3e30226..7b46a59 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -61,11 +61,13 @@
 
     // Following setters and getters are protected by mTimeLock.
     status_t getCurrentPosition(int64_t *mediaUs);
-    status_t getCurrentPosition(int64_t *mediaUs, int64_t nowUs);
+    status_t getCurrentPosition(
+            int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
     void setHasMedia(bool audio);
     void setAudioFirstAnchorTime(int64_t mediaUs);
     void setAudioFirstAnchorTimeIfNeeded(int64_t mediaUs);
-    void setVideoAnchorTime(int64_t mediaUs, int64_t realUs);
+    void setAnchorTime(
+            int64_t mediaUs, int64_t realUs, int64_t numFramesWritten = -1, bool resume = false);
     void setVideoLateByUs(int64_t lateUs);
     int64_t getVideoLateByUs();
     void setPauseStartedTimeRealUs(int64_t realUs);
@@ -101,6 +103,7 @@
     enum {
         kWhatDrainAudioQueue     = 'draA',
         kWhatDrainVideoQueue     = 'draV',
+        kWhatPostDrainVideoQueue = 'pDVQ',
         kWhatQueueBuffer         = 'queB',
         kWhatQueueEOS            = 'qEOS',
         kWhatFlush               = 'flus',
@@ -119,6 +122,7 @@
         sp<AMessage> mNotifyConsumed;
         size_t mOffset;
         status_t mFinalResult;
+        int32_t mBufferOrdinal;
     };
 
     static const int64_t kMinPositionUpdateDelayUs;
@@ -144,8 +148,10 @@
     // |mTimeLock|.
     // TODO: move those members to a seperated media clock class.
     int64_t mAudioFirstAnchorTimeMediaUs;
-    int64_t mVideoAnchorTimeMediaUs;
-    int64_t mVideoAnchorTimeRealUs;
+    int64_t mAnchorTimeMediaUs;
+    int64_t mAnchorTimeRealUs;
+    int64_t mAnchorNumFramesWritten;
+    int64_t mAnchorMaxMediaUs;
     int64_t mVideoLateByUs;
     bool mHasAudio;
     bool mHasVideo;
@@ -169,6 +175,10 @@
     bool mAudioOffloadTornDown;
     audio_offload_info_t mCurrentOffloadInfo;
 
+    int32_t mTotalBuffersQueued;
+    int32_t mLastAudioBufferDrained;
+
+
     size_t fillAudioBuffer(void *buffer, size_t size);
 
     bool onDrainAudioQueue();
@@ -176,6 +186,7 @@
     int64_t getPlayedOutAudioDurationUs(int64_t nowUs);
     void postDrainAudioQueue_l(int64_t delayUs = 0);
 
+    void onNewAudioMediaTime(int64_t mediaTimeUs);
     int64_t getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs);
 
     void onDrainVideoQueue();
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 2048808..2f2f9cf 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -498,6 +498,10 @@
     sp<AMessage> msg = new AMessage(kWhatShutdown, id());
     msg->setInt32("keepComponentAllocated", keepComponentAllocated);
     msg->post();
+    if (!keepComponentAllocated) {
+        // ensure shutdown completes in 3 seconds
+        (new AMessage(kWhatReleaseCodecInstance, id()))->post(3000000);
+    }
 }
 
 void ACodec::signalRequestIDRFrame() {
@@ -3797,6 +3801,19 @@
             break;
         }
 
+        case ACodec::kWhatReleaseCodecInstance:
+        {
+            ALOGI("[%s] forcing the release of codec",
+                    mCodec->mComponentName.c_str());
+            status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+            ALOGE_IF("[%s] failed to release codec instance: err=%d",
+                       mCodec->mComponentName.c_str(), err);
+            sp<AMessage> notify = mCodec->mNotify->dup();
+            notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
+            notify->post();
+            break;
+        }
+
         default:
             return false;
     }
@@ -4456,6 +4473,13 @@
             break;
         }
 
+        case ACodec::kWhatReleaseCodecInstance:
+        {
+            // nothing to do, as we have already signaled shutdown
+            handled = true;
+            break;
+        }
+
         default:
             return BaseState::onMessageReceived(msg);
     }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 5f55484..df47bd5 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -738,6 +738,7 @@
                             err, actionCode, mState);
                     if (err == DEAD_OBJECT) {
                         mFlags |= kFlagSawMediaServerDie;
+                        mFlags &= ~kFlagIsComponentAllocated;
                     }
 
                     bool sendErrorResponse = true;
@@ -863,6 +864,7 @@
                 {
                     CHECK_EQ(mState, INITIALIZING);
                     setState(INITIALIZED);
+                    mFlags |= kFlagIsComponentAllocated;
 
                     CHECK(msg->findString("componentName", &mComponentName));
 
@@ -1136,6 +1138,7 @@
                         setState(UNINITIALIZED);
                         mComponentName.clear();
                     }
+                    mFlags &= ~kFlagIsComponentAllocated;
 
                     (new AMessage)->postReply(mReplyID);
                     break;
@@ -1336,9 +1339,10 @@
             uint32_t replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
-            if (mState != INITIALIZED
+            if (!(mFlags & kFlagIsComponentAllocated) && mState != INITIALIZED
                     && mState != CONFIGURED && !isExecuting()) {
-                // We may be in "UNINITIALIZED" state already without the
+                // We may be in "UNINITIALIZED" state already and
+                // also shutdown the encoder/decoder without the
                 // client being aware of this if media server died while
                 // we were being stopped. The client would assume that
                 // after stop() returned, it would be safe to call release()
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index d07ec14..f9c84e2 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -149,6 +149,11 @@
 status_t OMXNodeInstance::freeNode(OMXMaster *master) {
     static int32_t kMaxNumIterations = 10;
 
+    // exit if we have already freed the node
+    if (mHandle == NULL) {
+        return OK;
+    }
+
     // Transition the node from its current state all the way down
     // to "Loaded".
     // This ensures that all active buffers are properly freed even
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 44e34b7..e443476 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3672,6 +3672,11 @@
         mEffectBufferValid = true;
     }
 
+    if (mEffectBufferValid) {
+        // as long as there are effects we should clear the effects buffer, to avoid
+        // passing a non-clean buffer to the effect chain
+        memset(mEffectBuffer, 0, mEffectBufferSize);
+    }
     // sink or mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to the sink or mix buffer
     // and track effects will accumulate into it
@@ -3690,10 +3695,6 @@
             // must imply MIXER_TRACKS_READY.
             // Later, we may clear buffers regardless, and skip much of this logic.
         }
-        // TODO - either mEffectBuffer or mSinkBuffer needs to be cleared.
-        if (mEffectBufferValid) {
-            memset(mEffectBuffer, 0, mEffectBufferSize);
-        }
         // FIXME as a performance optimization, should remember previous zero status
         memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
     }
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index 3c1c042..e7e1b36 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -105,26 +105,28 @@
     inputDesc->mRefCount++;
 
     ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
-
-    Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
-    for (size_t i = 0; i < effects.size(); i++) {
-        EffectDesc *effect = effects[i];
-        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
-        status_t status = fx->initCheck();
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGW("addInputEffects(): failed to create Fx %s on source %d",
+    if (inputDesc->mRefCount == 1) {
+        Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
+        for (size_t i = 0; i < effects.size(); i++) {
+            EffectDesc *effect = effects[i];
+            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0,
+                                                 audioSession, input);
+            status_t status = fx->initCheck();
+            if (status != NO_ERROR && status != ALREADY_EXISTS) {
+                ALOGW("addInputEffects(): failed to create Fx %s on source %d",
+                      effect->mName, (int32_t)aliasSource);
+                // fx goes out of scope and strong ref on AudioEffect is released
+                continue;
+            }
+            for (size_t j = 0; j < effect->mParams.size(); j++) {
+                fx->setParameter(effect->mParams[j]);
+            }
+            ALOGV("addInputEffects(): added Fx %s on source: %d",
                   effect->mName, (int32_t)aliasSource);
-            // fx goes out of scope and strong ref on AudioEffect is released
-            continue;
+            inputDesc->mEffects.add(fx);
         }
-        for (size_t j = 0; j < effect->mParams.size(); j++) {
-            fx->setParameter(effect->mParams[j]);
-        }
-        ALOGV("addInputEffects(): added Fx %s on source: %d", effect->mName, (int32_t)aliasSource);
-        inputDesc->mEffects.add(fx);
+        inputDesc->setProcessorEnabled(true);
     }
-    inputDesc->setProcessorEnabled(true);
-
     return status;
 }
 
@@ -241,26 +243,28 @@
     }
     procDesc->mRefCount++;
 
-    ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
-
-    Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
-    for (size_t i = 0; i < effects.size(); i++) {
-        EffectDesc *effect = effects[i];
-        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0, audioSession, output);
-        status_t status = fx->initCheck();
-        if (status != NO_ERROR && status != ALREADY_EXISTS) {
-            ALOGE("addOutputSessionEffects(): failed to create Fx  %s on session %d",
-                  effect->mName, audioSession);
-            // fx goes out of scope and strong ref on AudioEffect is released
-            continue;
+    ALOGV("addOutputSessionEffects(): session: %d, refCount: %d",
+          audioSession, procDesc->mRefCount);
+    if (procDesc->mRefCount == 1) {
+        Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
+        for (size_t i = 0; i < effects.size(); i++) {
+            EffectDesc *effect = effects[i];
+            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0,
+                                                 audioSession, output);
+            status_t status = fx->initCheck();
+            if (status != NO_ERROR && status != ALREADY_EXISTS) {
+                ALOGE("addOutputSessionEffects(): failed to create Fx  %s on session %d",
+                      effect->mName, audioSession);
+                // fx goes out of scope and strong ref on AudioEffect is released
+                continue;
+            }
+            ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d",
+                  effect->mName, audioSession, (int32_t)stream);
+            procDesc->mEffects.add(fx);
         }
-        ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d",
-              effect->mName, audioSession, (int32_t)stream);
-        procDesc->mEffects.add(fx);
+
+        procDesc->setProcessorEnabled(true);
     }
-
-    procDesc->setProcessorEnabled(true);
-
     return status;
 }
 
@@ -281,7 +285,8 @@
 
     EffectVector *procDesc = mOutputSessions.valueAt(index);
     procDesc->mRefCount--;
-    ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+    ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d",
+          audioSession, procDesc->mRefCount);
     if (procDesc->mRefCount == 0) {
         procDesc->setProcessorEnabled(false);
         procDesc->mEffects.clear();