Merge "RTSPSource: Do not update time when there are no tracks, i.e., when aborted." into mnc-dev
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 5e7b644..88a7745 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1510,17 +1510,7 @@
                 mVideoTimeUs = timeUs;
             }
 
-            // formatChange && seeking: track whose source is changed during selection
-            // formatChange && !seeking: track whose source is not changed during selection
-            // !formatChange: normal seek
-            if ((seeking || formatChange)
-                    && (trackType == MEDIA_TRACK_TYPE_AUDIO
-                    || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
-                ATSParser::DiscontinuityType type = (formatChange && seeking)
-                        ? ATSParser::DISCONTINUITY_FORMATCHANGE
-                        : ATSParser::DISCONTINUITY_NONE;
-                track->mPackets->queueDiscontinuity( type, NULL, true /* discard */);
-            }
+            queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
 
             sp<ABuffer> buffer = mediaBufferToABuffer(
                     mbuf, trackType, seekTimeUs, actualTimeUs);
@@ -1538,10 +1528,26 @@
                     false /* discard */);
 #endif
         } else {
+            queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
             track->mPackets->signalEOS(err);
             break;
         }
     }
 }
 
+void NuPlayer::GenericSource::queueDiscontinuityIfNeeded(
+        bool seeking, bool formatChange, media_track_type trackType, Track *track) {
+    // formatChange && seeking: track whose source is changed during selection
+    // formatChange && !seeking: track whose source is not changed during selection
+    // !formatChange: normal seek
+    if ((seeking || formatChange)
+            && (trackType == MEDIA_TRACK_TYPE_AUDIO
+            || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
+        ATSParser::DiscontinuityType type = (formatChange && seeking)
+                ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                : ATSParser::DISCONTINUITY_NONE;
+        track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */);
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 7fab051..0a75e4c 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -200,6 +200,9 @@
             media_track_type trackType,
             int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
 
+    void queueDiscontinuityIfNeeded(
+            bool seeking, bool formatChange, media_track_type trackType, Track *track);
+
     void schedulePollBuffering();
     void cancelPollBuffering();
     void restartPollBuffering();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 376c93a..d169964 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -414,6 +414,11 @@
     sp<ABuffer> buffer;
     mCodec->getInputBuffer(index, &buffer);
 
+    if (buffer == NULL) {
+        handleError(UNKNOWN_ERROR);
+        return false;
+    }
+
     if (index >= mInputBuffers.size()) {
         for (size_t i = mInputBuffers.size(); i <= index; ++i) {
             mInputBuffers.add();
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 1c53b40..f82636b 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -583,6 +583,13 @@
 
     Mutex::Autolock autoLock(mLock);
 
+    // If we're disconnecting, return EOS and don't access *data pointer.
+    // data could be on the stack of the caller to NuCachedSource2::readAt(),
+    // which may have exited already.
+    if (mDisconnecting) {
+        return ERROR_END_OF_STREAM;
+    }
+
     if (!mFetching) {
         mLastAccessPos = offset;
         restartPrefetcherIfNecessary_l(
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 51b2610..d1e52d5 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -356,6 +356,15 @@
             // check that channelMask is the "canonical" one we expect for the channelCount.
             return channelMask == audio_channel_out_mask_from_count(channelCount);
             }
+        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+            if (kEnableExtendedChannels) {
+                const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+                if (channelCount >= FCC_2 // mono is not supported at this time
+                        && channelCount <= AudioMixer::MAX_NUM_CHANNELS) {
+                    return true;
+                }
+            }
+            return false;
         default:
             return false;
         }
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 594ed05..93ba74d 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -697,49 +697,62 @@
 
 String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
     String8 s;
-    if (output) {
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
-        if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
-        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
-        if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
-    } else {
-        if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
-        if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
-        if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
-        if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
-        if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
-        if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
-        if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
-        if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
-        if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
-        if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
-        if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");
+    const audio_channel_representation_t representation = audio_channel_mask_get_representation(mask);
+
+    switch (representation) {
+    case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
+        if (output) {
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+            if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+            if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
+        } else {
+            if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
+            if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
+            if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
+            if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
+            if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
+            if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
+            if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
+            if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+            if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
+            if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
+            if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");
+        }
+        const int len = s.length();
+        if (len > 2) {
+            char *str = s.lockBuffer(len); // needed?
+            s.unlockBuffer(len - 2);       // remove trailing ", "
+        }
+        return s;
     }
-    int len = s.length();
-    if (s.length() > 2) {
-        char *str = s.lockBuffer(len);
-        s.unlockBuffer(len - 2);
+    case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+        s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
+        return s;
+    default:
+        s.appendFormat("unknown mask, representation:%d  bits:%#x",
+                representation, audio_channel_mask_get_bits(mask));
+        return s;
     }
-    return s;
 }
 
 void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
@@ -1572,10 +1585,12 @@
             ) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // identical channel mask to sink, or mono in and stereo sink
+            // TODO: extract as a data library function that checks that a computationally
+            // expensive downmixer is not required: isFastOutputChannelConversion()
             (channelMask == mChannelMask ||
-                    (channelMask == AUDIO_CHANNEL_OUT_MONO &&
-                            mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) &&
+                    mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
+                    (channelMask == AUDIO_CHANNEL_OUT_MONO
+                            /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
             // hardware sample rate
             (sampleRate == mSampleRate) &&
             // normal mixer has an associated fast mixer
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index fc9a332..a971928 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1975,7 +1975,7 @@
         auto conflicting = i->getConflicting();
         auto clientSp = i->getValue();
         String8 packageName;
-        userid_t clientUserId;
+        userid_t clientUserId = 0;
         if (clientSp.get() != nullptr) {
             packageName = String8{clientSp->getPackageName()};
             uid_t clientUid = clientSp->getClientUid();
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 05ede92..f2d6ab2 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1881,6 +1881,16 @@
     mCaptureSequencer->notifyAutoExposure(newState, triggerId);
 }
 
+void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
+                                  nsecs_t timestamp) {
+    (void)resultExtras;
+    (void)timestamp;
+
+    ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
+            __FUNCTION__, resultExtras.requestId, timestamp);
+    mCaptureSequencer->notifyShutter(resultExtras, timestamp);
+}
+
 camera2::SharedParameters& Camera2Client::getParameters() {
     return mParameters;
 }
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index a988037..3784aab 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -106,6 +106,8 @@
 
     virtual void notifyAutoFocus(uint8_t newState, int triggerId);
     virtual void notifyAutoExposure(uint8_t newState, int triggerId);
+    virtual void notifyShutter(const CaptureResultExtras& resultExtras,
+                               nsecs_t timestamp);
 
     /**
      * Interface used by independent components of Camera2Client.
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 9849f4d..d847e0f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -43,6 +43,8 @@
         mNewFrameReceived(false),
         mNewCaptureReceived(false),
         mShutterNotified(false),
+        mHalNotifiedShutter(false),
+        mShutterCaptureId(-1),
         mClient(client),
         mCaptureState(IDLE),
         mStateTransitionCount(0),
@@ -106,6 +108,16 @@
     }
 }
 
+void CaptureSequencer::notifyShutter(const CaptureResultExtras& resultExtras,
+                                     nsecs_t timestamp) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    if (!mHalNotifiedShutter && resultExtras.requestId == mShutterCaptureId) {
+        mHalNotifiedShutter = true;
+        mShutterNotifySignal.signal();
+    }
+}
+
 void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
     ATRACE_CALL();
     ALOGV("%s: New result available.", __FUNCTION__);
@@ -335,6 +347,11 @@
     } else {
         nextState = STANDARD_START;
     }
+    {
+        Mutex::Autolock l(mInputMutex);
+        mShutterCaptureId = mCaptureId;
+        mHalNotifiedShutter = false;
+    }
     mShutterNotified = false;
 
     return nextState;
@@ -541,6 +558,7 @@
             return DONE;
         }
     }
+
     // TODO: Capture should be atomic with setStreamingRequest here
     res = client->getCameraDevice()->capture(captureCopy);
     if (res != OK) {
@@ -560,6 +578,31 @@
     ATRACE_CALL();
     Mutex::Autolock l(mInputMutex);
 
+
+    // Wait for shutter callback
+    while (!mHalNotifiedShutter) {
+        if (mTimeoutCount <= 0) {
+            break;
+        }
+        res = mShutterNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            return STANDARD_CAPTURE_WAIT;
+        }
+    }
+
+    if (mHalNotifiedShutter) {
+        if (!mShutterNotified) {
+            SharedParameters::Lock l(client->getParameters());
+            /* warning: this also locks a SharedCameraCallbacks */
+            shutterNotifyLocked(l.mParameters, client, mMsgType);
+            mShutterNotified = true;
+        }
+    } else if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for shutter notification");
+        return DONE;
+    }
+
     // Wait for new metadata result (mNewFrame)
     while (!mNewFrameReceived) {
         res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -569,15 +612,6 @@
         }
     }
 
-    // Approximation of the shutter being closed
-    // - TODO: use the hal3 exposure callback in Camera3Device instead
-    if (mNewFrameReceived && !mShutterNotified) {
-        SharedParameters::Lock l(client->getParameters());
-        /* warning: this also locks a SharedCameraCallbacks */
-        shutterNotifyLocked(l.mParameters, client, mMsgType);
-        mShutterNotified = true;
-    }
-
     // Wait until jpeg was captured by JpegProcessor
     while (mNewFrameReceived && !mNewCaptureReceived) {
         res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -591,6 +625,7 @@
         return DONE;
     }
     if (mNewFrameReceived && mNewCaptureReceived) {
+
         if (mNewFrameId != mCaptureId) {
             ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
                     mCaptureId, mNewFrameId);
@@ -667,7 +702,6 @@
         sp<Camera2Client> &/*client*/) {
     status_t res;
     ATRACE_CALL();
-
     while (!mNewCaptureReceived) {
         res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
         if (res == TIMED_OUT) {
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index d42ab13..10252fb 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -62,6 +62,10 @@
     // Notifications about AE state changes
     void notifyAutoExposure(uint8_t newState, int triggerId);
 
+    // Notifications about shutter (capture start)
+    void notifyShutter(const CaptureResultExtras& resultExtras,
+                       nsecs_t timestamp);
+
     // Notification from the frame processor
     virtual void onResultAvailable(const CaptureResult &result);
 
@@ -95,7 +99,10 @@
     sp<MemoryBase> mCaptureBuffer;
     Condition mNewCaptureSignal;
 
-    bool mShutterNotified;
+    bool mShutterNotified; // Has CaptureSequencer sent shutter to Client
+    bool mHalNotifiedShutter; // Has HAL sent shutter to CaptureSequencer
+    int32_t mShutterCaptureId; // The captureId which is waiting for shutter notification
+    Condition mShutterNotifySignal;
 
     /**
      * Internal to CaptureSequencer