Merge "Incr. MIN_UNDEQUEUED_BUFFERS in SurfaceMediaSource"
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index e0d7898..6a15f6e 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -185,6 +185,10 @@
static status_t unregisterEffect(int id);
static status_t setEffectEnabled(int id, bool enabled);
+ // clear stream to output mapping cache (gStreamOutputMap)
+ // and output configuration cache (gOutputs)
+ static void clearAudioConfigCache();
+
static const sp<IAudioPolicyService>& get_audio_policy_service();
// ----------------------------------------------------------------------------
@@ -236,7 +240,8 @@
// mapping between stream types and outputs
static DefaultKeyedVector<int, audio_io_handle_t> gStreamOutputMap;
- // list of output descritor containing cached parameters (sampling rate, framecount, channel count...)
+ // list of output descriptors containing cached parameters
+ // (sampling rate, framecount, channel count...)
static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
};
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 97c69cb..d0940bb 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -347,6 +347,13 @@
// encoder
int mNumFramesEncoded;
+ // mFirstFrameTimestamp is the timestamp of the first received frame.
+ // It is used to offset the output timestamps so recording starts at time 0.
+ int64_t mFirstFrameTimestamp;
+ // mStartTimeNs is the start time passed into the source at start, used to
+ // offset timestamps.
+ int64_t mStartTimeNs;
+
// mFrameAvailableCondition condition used to indicate whether there
// is a frame available for dequeuing
Condition mFrameAvailableCondition;
diff --git a/include/media/stagefright/openmax/OMX_IVCommon.h b/include/media/stagefright/openmax/OMX_IVCommon.h
index 65b6339..8bb4ded 100644
--- a/include/media/stagefright/openmax/OMX_IVCommon.h
+++ b/include/media/stagefright/openmax/OMX_IVCommon.h
@@ -156,7 +156,7 @@
* Android-specific OMX IL colorformats. Change this enum to
* an acceptable range once that is done.
* */
- OMX_COLOR_FormatAndroidOpaque = 0x7F000001,
+ OMX_COLOR_FormatAndroidOpaque = 0x7F000789,
OMX_TI_COLOR_FormatYUV420PackedSemiPlanar = 0x7F000100,
OMX_QCOM_COLOR_FormatYVU420SemiPlanar = 0x7FA30C00,
OMX_COLOR_FormatMax = 0x7FFFFFFF
diff --git a/include/private/surfaceflinger/LayerState.h b/include/private/surfaceflinger/LayerState.h
index d2fed41..3eb5c99 100644
--- a/include/private/surfaceflinger/LayerState.h
+++ b/include/private/surfaceflinger/LayerState.h
@@ -54,8 +54,8 @@
};
SurfaceID surface;
uint32_t what;
- int32_t x;
- int32_t y;
+ float x;
+ float y;
uint32_t z;
uint32_t w;
uint32_t h;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index bb91fa9..853a5f6 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -727,6 +727,14 @@
}
+void AudioSystem::clearAudioConfigCache()
+{
+ Mutex::Autolock _l(gLock);
+ LOGV("clearAudioConfigCache()");
+ gStreamOutputMap.clear();
+ gOutputs.clear();
+}
+
// ---------------------------------------------------------------------------
void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index cecedb5..3b6c64d 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -1164,6 +1164,10 @@
cblk->cv.broadcast();
cblk->lock.unlock();
+ // refresh the audio configuration cache in this process to make sure we get new
+ // output parameters in getOutput_l() and createTrack_l()
+ AudioSystem::clearAudioConfigCache();
+
// if the new IAudioTrack is created, createTrack_l() will modify the
// following member variables: mAudioTrack, mCblkMemory and mCblk.
// It will also delete the strong references on previous IAudioTrack and IMemory
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
index 40e055c..cd4b1ef 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ b/media/libmediaplayerservice/StagefrightPlayer.cpp
@@ -72,16 +72,14 @@
status_t StagefrightPlayer::setVideoSurface(const sp<Surface> &surface) {
LOGV("setVideoSurface");
- mPlayer->setSurface(surface);
- return OK;
+ return mPlayer->setSurface(surface);
}
status_t StagefrightPlayer::setVideoSurfaceTexture(
const sp<ISurfaceTexture> &surfaceTexture) {
LOGV("setVideoSurfaceTexture");
- mPlayer->setSurfaceTexture(surfaceTexture);
- return OK;
+ return mPlayer->setSurfaceTexture(surfaceTexture);
}
status_t StagefrightPlayer::prepare() {
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 0251baf..605d056 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -124,7 +124,14 @@
: ATSParser::DISCONTINUITY_FORMATCHANGE,
extra);
} else {
- mTSParser->feedTSPacket(buffer, sizeof(buffer));
+ status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
+
+ if (err != OK) {
+ LOGE("TS Parser returned error %d", err);
+ mTSParser->signalEOS(err);
+ mEOS = true;
+ break;
+ }
}
mOffset += n;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 7fb141a..ee77f47 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -316,9 +316,11 @@
&cropLeft, &cropTop, &cropRight, &cropBottom));
LOGV("Video output format changed to %d x %d "
- "(crop: %d, %d, %d, %d)",
+ "(crop: %d x %d @ (%d, %d))",
width, height,
- cropLeft, cropTop, cropRight, cropBottom);
+ (cropRight - cropLeft + 1),
+ (cropBottom - cropTop + 1),
+ cropLeft, cropTop);
notifyListener(
MEDIA_SET_VIDEO_SIZE,
@@ -661,6 +663,19 @@
sp<AMessage> reply;
CHECK(msg->findMessage("reply", &reply));
+ if (IsFlushingState(audio ? mFlushingAudio : mFlushingVideo)) {
+ // We're currently attempting to flush the decoder, in order
+ // to complete this, the decoder wants all its buffers back,
+ // so we don't want any output buffers it sent us (from before
+ // we initiated the flush) to be stuck in the renderer's queue.
+
+ LOGV("we're still flushing the %s decoder, sending its output buffer"
+ " right back.", audio ? "audio" : "video");
+
+ reply->post();
+ return;
+ }
+
sp<RefBase> obj;
CHECK(msg->findObject("buffer", &obj));
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 35ed43f..8f213da 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -395,29 +395,40 @@
postDrainVideoQueue();
}
- if (mSyncQueues && !mAudioQueue.empty() && !mVideoQueue.empty()) {
- int64_t firstAudioTimeUs;
- int64_t firstVideoTimeUs;
- CHECK((*mAudioQueue.begin()).mBuffer->meta()
- ->findInt64("timeUs", &firstAudioTimeUs));
- CHECK((*mVideoQueue.begin()).mBuffer->meta()
- ->findInt64("timeUs", &firstVideoTimeUs));
-
- int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
-
- LOGV("queueDiff = %.2f secs", diff / 1E6);
-
- if (diff > 100000ll) {
- // Audio data starts More than 0.1 secs before video.
- // Drop some audio.
-
- (*mAudioQueue.begin()).mNotifyConsumed->post();
- mAudioQueue.erase(mAudioQueue.begin());
- return;
- }
-
- syncQueuesDone();
+ if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
+ return;
}
+
+ sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
+ sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
+
+ if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
+ // EOS signalled on either queue.
+ syncQueuesDone();
+ return;
+ }
+
+ int64_t firstAudioTimeUs;
+ int64_t firstVideoTimeUs;
+ CHECK(firstAudioBuffer->meta()
+ ->findInt64("timeUs", &firstAudioTimeUs));
+ CHECK(firstVideoBuffer->meta()
+ ->findInt64("timeUs", &firstVideoTimeUs));
+
+ int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
+
+ LOGV("queueDiff = %.2f secs", diff / 1E6);
+
+ if (diff > 100000ll) {
+ // Audio data starts More than 0.1 secs before video.
+ // Drop some audio.
+
+ (*mAudioQueue.begin()).mNotifyConsumed->post();
+ mAudioQueue.erase(mAudioQueue.begin());
+ return;
+ }
+
+ syncQueuesDone();
}
void NuPlayer::Renderer::syncQueuesDone() {
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index a6a3a18..a741987 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -42,7 +42,7 @@
void NuPlayer::StreamingSource::start() {
mStreamListener = new NuPlayerStreamListener(mSource, 0);
- mTSParser = new ATSParser;
+ mTSParser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
mStreamListener->start();
}
@@ -86,7 +86,15 @@
: ATSParser::DISCONTINUITY_FORMATCHANGE,
extra);
} else {
- mTSParser->feedTSPacket(buffer, sizeof(buffer));
+ status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
+
+ if (err != OK) {
+ LOGE("TS Parser returned error %d", err);
+
+ mTSParser->signalEOS(err);
+ mEOS = true;
+ break;
+ }
}
}
}
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index a5a6b64..52b1200 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -33,8 +33,6 @@
namespace android {
-#define ADTS_HEADER_LENGTH 7
-
class AACSource : public MediaSource {
public:
AACSource(const sp<DataSource> &source,
@@ -88,7 +86,16 @@
return 0;
}
-static size_t getFrameSize(const sp<DataSource> &source, off64_t offset) {
+// Returns the frame length in bytes as described in an ADTS header starting at the given offset,
+// or 0 if the size can't be read due to an error in the header or a read failure.
+// The returned value is the AAC frame size with the ADTS header length (regardless of
+// the presence of the CRC).
+// If headerSize is non-NULL, it will be used to return the size of the header of this ADTS frame.
+static size_t getAdtsFrameLength(const sp<DataSource> &source, off64_t offset, size_t* headerSize) {
+
+ const size_t kAdtsHeaderLengthNoCrc = 7;
+ const size_t kAdtsHeaderLengthWithCrc = 9;
+
size_t frameSize = 0;
uint8_t syncword[2];
@@ -111,7 +118,15 @@
}
frameSize = (header[0] & 0x3) << 11 | header[1] << 3 | header[2] >> 5;
- frameSize += ADTS_HEADER_LENGTH + protectionAbsent ? 0 : 2;
+
+ // protectionAbsent is 0 if there is CRC
+ size_t headSize = protectionAbsent ? kAdtsHeaderLengthNoCrc : kAdtsHeaderLengthWithCrc;
+ if (headSize > frameSize) {
+ return 0;
+ }
+ if (headerSize != NULL) {
+ *headerSize = headSize;
+ }
return frameSize;
}
@@ -148,7 +163,7 @@
if (mDataSource->getSize(&streamSize) == OK) {
while (offset < streamSize) {
- if ((frameSize = getFrameSize(source, offset)) == 0) {
+ if ((frameSize = getAdtsFrameLength(source, offset, NULL)) == 0) {
return;
}
@@ -268,8 +283,8 @@
}
}
- size_t frameSize, frameSizeWithoutHeader;
- if ((frameSize = getFrameSize(mDataSource, mOffset)) == 0) {
+ size_t frameSize, frameSizeWithoutHeader, headerSize;
+ if ((frameSize = getAdtsFrameLength(mDataSource, mOffset, &headerSize)) == 0) {
return ERROR_END_OF_STREAM;
}
@@ -279,8 +294,8 @@
return err;
}
- frameSizeWithoutHeader = frameSize - ADTS_HEADER_LENGTH;
- if (mDataSource->readAt(mOffset + ADTS_HEADER_LENGTH, buffer->data(),
+ frameSizeWithoutHeader = frameSize - headerSize;
+ if (mDataSource->readAt(mOffset + headerSize, buffer->data(),
frameSizeWithoutHeader) != (ssize_t)frameSizeWithoutHeader) {
buffer->release();
buffer = NULL;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 5d91f6a..e9dc61c 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1738,7 +1738,17 @@
void ACodec::LoadedToIdleState::stateEntered() {
LOGV("[%s] Now Loaded->Idle", mCodec->mComponentName.c_str());
- CHECK_EQ(allocateBuffers(), (status_t)OK);
+ status_t err;
+ if ((err = allocateBuffers()) != OK) {
+ LOGE("Failed to allocate buffers after transitioning to IDLE state "
+ "(error 0x%08x)",
+ err);
+
+ sp<AMessage> notify = mCodec->mNotify->dup();
+ notify->setInt32("what", ACodec::kWhatError);
+ notify->setInt32("omx-error", OMX_ErrorUndefined);
+ notify->post();
+ }
}
status_t ACodec::LoadedToIdleState::allocateBuffers() {
@@ -2046,8 +2056,18 @@
mCodec->mNode, OMX_CommandPortEnable, kPortIndexOutput),
(status_t)OK);
- CHECK_EQ(mCodec->allocateBuffersOnPort(kPortIndexOutput),
- (status_t)OK);
+ status_t err;
+ if ((err = mCodec->allocateBuffersOnPort(
+ kPortIndexOutput)) != OK) {
+ LOGE("Failed to allocate output port buffers after "
+ "port reconfiguration (error 0x%08x)",
+ err);
+
+ sp<AMessage> notify = mCodec->mNotify->dup();
+ notify->setInt32("what", ACodec::kWhatError);
+ notify->setInt32("omx-error", OMX_ErrorUndefined);
+ notify->post();
+ }
return true;
} else if (data1 == (OMX_U32)OMX_CommandPortEnable) {
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index bc42a42..f98b0de 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -513,7 +513,8 @@
// If we did this later, audio would continue playing while we
// shutdown the video-related resources and the player appear to
// not be as responsive to a reset request.
- if (mAudioPlayer == NULL && mAudioSource != NULL) {
+ if ((mAudioPlayer == NULL || !(mFlags & AUDIOPLAYER_STARTED))
+ && mAudioSource != NULL) {
// If we had an audio player, it would have effectively
// taken possession of the audio source and stopped it when
// _it_ is stopped. Otherwise this is still our responsibility.
@@ -1152,22 +1153,26 @@
return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN);
}
-void AwesomePlayer::setSurface(const sp<Surface> &surface) {
+status_t AwesomePlayer::setSurface(const sp<Surface> &surface) {
Mutex::Autolock autoLock(mLock);
mSurface = surface;
- setNativeWindow_l(surface);
+ return setNativeWindow_l(surface);
}
-void AwesomePlayer::setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) {
+status_t AwesomePlayer::setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) {
Mutex::Autolock autoLock(mLock);
mSurface.clear();
+
+ status_t err;
if (surfaceTexture != NULL) {
- setNativeWindow_l(new SurfaceTextureClient(surfaceTexture));
+ err = setNativeWindow_l(new SurfaceTextureClient(surfaceTexture));
} else {
- setNativeWindow_l(NULL);
+ err = setNativeWindow_l(NULL);
}
+
+ return err;
}
void AwesomePlayer::shutdownVideoDecoder_l() {
@@ -1190,11 +1195,11 @@
LOGI("video decoder shutdown completed");
}
-void AwesomePlayer::setNativeWindow_l(const sp<ANativeWindow> &native) {
+status_t AwesomePlayer::setNativeWindow_l(const sp<ANativeWindow> &native) {
mNativeWindow = native;
if (mVideoSource == NULL) {
- return;
+ return OK;
}
LOGI("attempting to reconfigure to use new surface");
@@ -1206,7 +1211,12 @@
shutdownVideoDecoder_l();
- CHECK_EQ(initVideoDecoder(), (status_t)OK);
+ status_t err = initVideoDecoder();
+
+ if (err != OK) {
+ LOGE("failed to reinstantiate video decoder after surface change.");
+ return err;
+ }
if (mLastVideoTimeUs >= 0) {
mSeeking = SEEK;
@@ -1217,6 +1227,8 @@
if (wasPlaying) {
play_l();
}
+
+ return OK;
}
void AwesomePlayer::setAudioSink(
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 5f58090..46d87df 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -1173,7 +1173,7 @@
size_t sampleCount, int32_t duration) {
if (duration == 0) {
- LOGW("%d 0-duration samples found: %d", sampleCount);
+ LOGW("0-duration samples found: %d", sampleCount);
}
SttsTableEntry sttsEntry(sampleCount, duration);
mSttsTableEntries.push_back(sttsEntry);
@@ -1304,7 +1304,7 @@
void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
LOGV("writeChunkToFile: %lld from %s track",
- chunk.mTimestampUs, chunk.mTrack->isAudio()? "audio": "video");
+ chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");
int32_t isFirstSample = true;
while (!chunk->mSamples.empty()) {
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7f09319..d5b013d 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1990,7 +1990,7 @@
CHECK(mIsEncoder);
if (mDecodingTimeList.empty()) {
- CHECK(mNoMoreOutputData);
+ CHECK(mSignalledEOS || mNoMoreOutputData);
// No corresponding input frame available.
// This could happen when EOS is reached.
return 0;
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index c2e6707..50dd804 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -46,9 +46,10 @@
mSynchronousMode(true),
mConnectedApi(NO_CONNECTED_API),
mFrameRate(30),
+ mStopped(false),
mNumFramesReceived(0),
mNumFramesEncoded(0),
- mStopped(false) {
+ mFirstFrameTimestamp(0) {
LOGV("SurfaceMediaSource::SurfaceMediaSource");
sp<ISurfaceComposer> composer(ComposerService::getComposerService());
mGraphicBufferAlloc = composer->createGraphicBufferAlloc();
@@ -179,9 +180,11 @@
// TODO: Currently just uses mDefaultWidth/Height. In the future
// we might declare mHeight and mWidth and check against those here.
if ((w != 0) || (h != 0)) {
- LOGE("dequeuebuffer: invalid buffer size! Req: %dx%d, Found: %dx%d",
- mDefaultWidth, mDefaultHeight, w, h);
- return BAD_VALUE;
+ if ((w != mDefaultWidth) || (h != mDefaultHeight)) {
+ LOGE("dequeuebuffer: invalid buffer size! Req: %dx%d, Found: %dx%d",
+ mDefaultWidth, mDefaultHeight, w, h);
+ return BAD_VALUE;
+ }
}
status_t returnFlags(OK);
@@ -469,10 +472,25 @@
return -EINVAL;
}
+ if (mNumFramesReceived == 0) {
+ mFirstFrameTimestamp = timestamp;
+ // Initial delay
+ if (mStartTimeNs > 0) {
+ if (timestamp < mStartTimeNs) {
+ // This frame predates start of record, discard
+ mSlots[bufIndex].mBufferState = BufferSlot::FREE;
+ mDequeueCondition.signal();
+ return OK;
+ }
+ mStartTimeNs = timestamp - mStartTimeNs;
+ }
+ }
+ timestamp = mStartTimeNs + (timestamp - mFirstFrameTimestamp);
+
+ mNumFramesReceived++;
if (mSynchronousMode) {
// in synchronous mode we queue all buffers in a FIFO
mQueue.push_back(bufIndex);
- mNumFramesReceived++;
LOGV("Client queued buf# %d @slot: %d, Q size = %d, handle = %p, timestamp = %lld",
mNumFramesReceived, bufIndex, mQueue.size(),
mSlots[bufIndex].mGraphicBuffer->handle, timestamp);
@@ -682,6 +700,13 @@
status_t SurfaceMediaSource::start(MetaData *params)
{
LOGV("started!");
+
+ mStartTimeNs = 0;
+ int64_t startTimeUs;
+ if (params && params->findInt64(kKeyTime, &startTimeUs)) {
+ mStartTimeNs = startTimeUs * 1000;
+ }
+
return OK;
}
@@ -751,6 +776,7 @@
mCurrentBuf = mSlots[mCurrentSlot].mGraphicBuffer;
int64_t prevTimeStamp = mCurrentTimestamp;
mCurrentTimestamp = mSlots[mCurrentSlot].mTimestamp;
+
mNumFramesEncoded++;
// Pass the data to the MediaBuffer. Pass in only the metadata
passMetadataBufferLocked(buffer);
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 14476d3..24cf77c 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -84,8 +84,8 @@
bool isPlaying() const;
- void setSurface(const sp<Surface> &surface);
- void setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture);
+ status_t setSurface(const sp<Surface> &surface);
+ status_t setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture);
void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
status_t setLooping(bool shouldLoop);
@@ -298,7 +298,7 @@
void postAudioSeekComplete_l();
void shutdownVideoDecoder_l();
- void setNativeWindow_l(const sp<ANativeWindow> &native);
+ status_t setNativeWindow_l(const sp<ANativeWindow> &native);
bool isStreamingHTTP() const;
void sendCacheStats();
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5bbc2b4..017d01c 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -48,7 +48,7 @@
bool parsePID(
unsigned pid, unsigned payload_unit_start_indicator,
- ABitReader *br);
+ ABitReader *br, status_t *err);
void signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra);
@@ -77,7 +77,7 @@
bool mFirstPTSValid;
uint64_t mFirstPTS;
- void parseProgramMap(ABitReader *br);
+ status_t parseProgramMap(ABitReader *br);
DISALLOW_EVIL_CONSTRUCTORS(Program);
};
@@ -111,8 +111,6 @@
sp<ABuffer> mBuffer;
sp<AnotherPacketSource> mSource;
bool mPayloadStarted;
- DiscontinuityType mPendingDiscontinuity;
- sp<AMessage> mPendingDiscontinuityExtra;
ElementaryStreamQueue *mQueue;
@@ -125,9 +123,6 @@
void extractAACFrames(const sp<ABuffer> &buffer);
- void deferDiscontinuity(
- DiscontinuityType type, const sp<AMessage> &extra);
-
DISALLOW_EVIL_CONSTRUCTORS(Stream);
};
@@ -145,14 +140,17 @@
bool ATSParser::Program::parsePID(
unsigned pid, unsigned payload_unit_start_indicator,
- ABitReader *br) {
+ ABitReader *br, status_t *err) {
+ *err = OK;
+
if (pid == mProgramMapPID) {
if (payload_unit_start_indicator) {
unsigned skip = br->getBits(8);
br->skipBits(skip * 8);
}
- parseProgramMap(br);
+ *err = parseProgramMap(br);
+
return true;
}
@@ -185,7 +183,7 @@
unsigned mPID;
};
-void ATSParser::Program::parseProgramMap(ABitReader *br) {
+status_t ATSParser::Program::parseProgramMap(ABitReader *br) {
unsigned table_id = br->getBits(8);
LOGV(" table_id = %u", table_id);
CHECK_EQ(table_id, 0x02u);
@@ -288,7 +286,60 @@
}
if (PIDsChanged) {
- mStreams.clear();
+#if 0
+ LOGI("before:");
+ for (size_t i = 0; i < mStreams.size(); ++i) {
+ sp<Stream> stream = mStreams.editValueAt(i);
+
+ LOGI("PID 0x%08x => type 0x%02x", stream->pid(), stream->type());
+ }
+
+ LOGI("after:");
+ for (size_t i = 0; i < infos.size(); ++i) {
+ StreamInfo &info = infos.editItemAt(i);
+
+ LOGI("PID 0x%08x => type 0x%02x", info.mPID, info.mType);
+ }
+#endif
+
+ // The only case we can recover from is if we have two streams
+ // and they switched PIDs.
+
+ bool success = false;
+
+ if (mStreams.size() == 2 && infos.size() == 2) {
+ const StreamInfo &info1 = infos.itemAt(0);
+ const StreamInfo &info2 = infos.itemAt(1);
+
+ sp<Stream> s1 = mStreams.editValueAt(0);
+ sp<Stream> s2 = mStreams.editValueAt(1);
+
+ bool caseA =
+ info1.mPID == s1->pid() && info1.mType == s2->type()
+ && info2.mPID == s2->pid() && info2.mType == s1->type();
+
+ bool caseB =
+ info1.mPID == s2->pid() && info1.mType == s1->type()
+ && info2.mPID == s1->pid() && info2.mType == s2->type();
+
+ if (caseA || caseB) {
+ unsigned pid1 = s1->pid();
+ unsigned pid2 = s2->pid();
+ s1->setPID(pid2);
+ s2->setPID(pid1);
+
+ mStreams.clear();
+ mStreams.add(s1->pid(), s1);
+ mStreams.add(s2->pid(), s2);
+
+ success = true;
+ }
+ }
+
+ if (!success) {
+ LOGI("Stream PIDs changed and we cannot recover.");
+ return ERROR_MALFORMED;
+ }
}
for (size_t i = 0; i < infos.size(); ++i) {
@@ -299,13 +350,10 @@
if (index < 0) {
sp<Stream> stream = new Stream(this, info.mPID, info.mType);
mStreams.add(info.mPID, stream);
-
- if (PIDsChanged) {
- sp<AMessage> extra;
- stream->signalDiscontinuity(DISCONTINUITY_FORMATCHANGE, extra);
- }
}
}
+
+ return OK;
}
sp<MediaSource> ATSParser::Program::getSource(SourceType type) {
@@ -325,14 +373,16 @@
}
int64_t ATSParser::Program::convertPTSToTimestamp(uint64_t PTS) {
- if (!mFirstPTSValid) {
- mFirstPTSValid = true;
- mFirstPTS = PTS;
- PTS = 0;
- } else if (PTS < mFirstPTS) {
- PTS = 0;
- } else {
- PTS -= mFirstPTS;
+ if (!(mParser->mFlags & TS_TIMESTAMPS_ARE_ABSOLUTE)) {
+ if (!mFirstPTSValid) {
+ mFirstPTSValid = true;
+ mFirstPTS = PTS;
+ PTS = 0;
+ } else if (PTS < mFirstPTS) {
+ PTS = 0;
+ } else {
+ PTS -= mFirstPTS;
+ }
}
return (PTS * 100) / 9;
@@ -345,12 +395,8 @@
: mProgram(program),
mElementaryPID(elementaryPID),
mStreamType(streamType),
- mBuffer(new ABuffer(192 * 1024)),
mPayloadStarted(false),
- mPendingDiscontinuity(DISCONTINUITY_NONE),
mQueue(NULL) {
- mBuffer->setRange(0, 0);
-
switch (mStreamType) {
case STREAMTYPE_H264:
mQueue = new ElementaryStreamQueue(ElementaryStreamQueue::H264);
@@ -380,6 +426,11 @@
}
LOGV("new stream PID 0x%02x, type 0x%02x", elementaryPID, streamType);
+
+ if (mQueue != NULL) {
+ mBuffer = new ABuffer(192 * 1024);
+ mBuffer->setRange(0, 0);
+ }
}
ATSParser::Stream::~Stream() {
@@ -389,6 +440,10 @@
void ATSParser::Stream::parse(
unsigned payload_unit_start_indicator, ABitReader *br) {
+ if (mQueue == NULL) {
+ return;
+ }
+
if (payload_unit_start_indicator) {
if (mPayloadStarted) {
// Otherwise we run the danger of receiving the trailing bytes
@@ -427,6 +482,10 @@
void ATSParser::Stream::signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra) {
+ if (mQueue == NULL) {
+ return;
+ }
+
mPayloadStarted = false;
mBuffer->setRange(0, 0);
@@ -451,8 +510,6 @@
if (mSource != NULL) {
mSource->queueDiscontinuity(type, extra);
- } else {
- deferDiscontinuity(type, extra);
}
break;
}
@@ -463,15 +520,6 @@
}
}
-void ATSParser::Stream::deferDiscontinuity(
- DiscontinuityType type, const sp<AMessage> &extra) {
- if (type > mPendingDiscontinuity) {
- // Only upgrade discontinuities.
- mPendingDiscontinuity = type;
- mPendingDiscontinuityExtra = extra;
- }
-}
-
void ATSParser::Stream::signalEOS(status_t finalResult) {
if (mSource != NULL) {
mSource->signalEOS(finalResult);
@@ -656,10 +704,6 @@
const uint8_t *data, size_t size) {
LOGV("onPayloadData mStreamType=0x%02x", mStreamType);
- if (mQueue == NULL) {
- return;
- }
-
CHECK(PTS_DTS_flags == 2 || PTS_DTS_flags == 3);
int64_t timeUs = mProgram->convertPTSToTimestamp(PTS);
@@ -679,14 +723,6 @@
mElementaryPID, mStreamType);
mSource = new AnotherPacketSource(meta);
-
- if (mPendingDiscontinuity != DISCONTINUITY_NONE) {
- mSource->queueDiscontinuity(
- mPendingDiscontinuity, mPendingDiscontinuityExtra);
- mPendingDiscontinuity = DISCONTINUITY_NONE;
- mPendingDiscontinuityExtra.clear();
- }
-
mSource->queueAccessUnit(accessUnit);
}
} else if (mQueue->getFormat() != NULL) {
@@ -734,17 +770,18 @@
////////////////////////////////////////////////////////////////////////////////
-ATSParser::ATSParser() {
+ATSParser::ATSParser(uint32_t flags)
+ : mFlags(flags) {
}
ATSParser::~ATSParser() {
}
-void ATSParser::feedTSPacket(const void *data, size_t size) {
+status_t ATSParser::feedTSPacket(const void *data, size_t size) {
CHECK_EQ(size, kTSPacketSize);
ABitReader br((const uint8_t *)data, kTSPacketSize);
- parseTS(&br);
+ return parseTS(&br);
}
void ATSParser::signalDiscontinuity(
@@ -822,7 +859,7 @@
MY_LOGV(" CRC = 0x%08x", br->getBits(32));
}
-void ATSParser::parsePID(
+status_t ATSParser::parsePID(
ABitReader *br, unsigned PID,
unsigned payload_unit_start_indicator) {
if (PID == 0) {
@@ -831,13 +868,18 @@
br->skipBits(skip * 8);
}
parseProgramAssociationTable(br);
- return;
+ return OK;
}
bool handled = false;
for (size_t i = 0; i < mPrograms.size(); ++i) {
+ status_t err;
if (mPrograms.editItemAt(i)->parsePID(
- PID, payload_unit_start_indicator, br)) {
+ PID, payload_unit_start_indicator, br, &err)) {
+ if (err != OK) {
+ return err;
+ }
+
handled = true;
break;
}
@@ -846,6 +888,8 @@
if (!handled) {
LOGV("PID 0x%04x not handled.", PID);
}
+
+ return OK;
}
void ATSParser::parseAdaptationField(ABitReader *br) {
@@ -855,7 +899,7 @@
}
}
-void ATSParser::parseTS(ABitReader *br) {
+status_t ATSParser::parseTS(ABitReader *br) {
LOGV("---");
unsigned sync_byte = br->getBits(8);
@@ -886,8 +930,10 @@
}
if (adaptation_field_control == 1 || adaptation_field_control == 3) {
- parsePID(br, PID, payload_unit_start_indicator);
+ return parsePID(br, PID, payload_unit_start_indicator);
}
+
+ return OK;
}
sp<MediaSource> ATSParser::getSource(SourceType type) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 1e6451d..388cb54 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -38,9 +38,18 @@
DISCONTINUITY_FORMATCHANGE
};
- ATSParser();
+ enum Flags {
+ // The 90kHz clock (PTS/DTS) is absolute, i.e. PTS=0 corresponds to
+ // a media time of 0.
+ // If this flag is _not_ specified, the first PTS encountered in a
+ // program of this stream will be assumed to correspond to media time 0
+ // instead.
+ TS_TIMESTAMPS_ARE_ABSOLUTE = 1
+ };
- void feedTSPacket(const void *data, size_t size);
+ ATSParser(uint32_t flags = 0);
+
+ status_t feedTSPacket(const void *data, size_t size);
void signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra);
@@ -73,18 +82,19 @@
struct Program;
struct Stream;
+ uint32_t mFlags;
Vector<sp<Program> > mPrograms;
void parseProgramAssociationTable(ABitReader *br);
void parseProgramMap(ABitReader *br);
void parsePES(ABitReader *br);
- void parsePID(
+ status_t parsePID(
ABitReader *br, unsigned PID,
unsigned payload_unit_start_indicator);
void parseAdaptationField(ABitReader *br);
- void parseTS(ABitReader *br);
+ status_t parseTS(ABitReader *br);
DISALLOW_EVIL_CONSTRUCTORS(ATSParser);
};
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 8250ad1..17cf45a 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -210,13 +210,10 @@
if (n < (ssize_t)kTSPacketSize) {
return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
- } else {
- mParser->feedTSPacket(packet, kTSPacketSize);
}
mOffset += n;
-
- return OK;
+ return mParser->feedTSPacket(packet, kTSPacketSize);
}
void MPEG2TSExtractor::setLiveSession(const sp<LiveSession> &liveSession) {
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index d643a0b..d663602 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -106,13 +106,14 @@
mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
window.get(), NULL);
} else {
- EGLint pbufferAttribs[] = {
- EGL_WIDTH, getSurfaceWidth(),
- EGL_HEIGHT, getSurfaceHeight(),
- EGL_NONE };
+ LOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource");
+ sp<SurfaceMediaSource> sms = new SurfaceMediaSource(
+ getSurfaceWidth(), getSurfaceHeight());
+ sp<SurfaceTextureClient> stc = new SurfaceTextureClient(sms);
+ sp<ANativeWindow> window = stc;
- mEglSurface = eglCreatePbufferSurface(mEglDisplay, mGlConfig,
- pbufferAttribs);
+ mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
+ window.get(), NULL);
}
ASSERT_EQ(EGL_SUCCESS, eglGetError());
ASSERT_NE(EGL_NO_SURFACE, mEglSurface);
@@ -408,7 +409,6 @@
mSTC.clear();
mANW.clear();
GLTest::TearDown();
- eglDestroySurface(mEglDisplay, mSmsEglSurface);
}
void setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr);
@@ -419,8 +419,6 @@
sp<SurfaceMediaSource> mSMS;
sp<SurfaceTextureClient> mSTC;
sp<ANativeWindow> mANW;
- EGLConfig mSMSGlConfig;
- EGLSurface mSmsEglSurface;
};
/////////////////////////////////////////////////////////////////////
@@ -462,7 +460,7 @@
glClear(GL_COLOR_BUFFER_BIT);
// The following call dequeues and queues the buffer
- eglSwapBuffers(mEglDisplay, mSmsEglSurface);
+ eglSwapBuffers(mEglDisplay, mEglSurface);
glDisable(GL_SCISSOR_TEST);
}
@@ -488,19 +486,12 @@
mSTC = new SurfaceTextureClient(iST);
mANW = mSTC;
- EGLint numConfigs = 0;
- EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mSMSGlConfig,
- 1, &numConfigs));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
-
- LOGV("Native Window = %p, mSTC = %p", mANW.get(), mSTC.get());
-
- mSmsEglSurface = eglCreateWindowSurface(mEglDisplay, mSMSGlConfig,
+ mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
mANW.get(), NULL);
ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_SURFACE, mSmsEglSurface) ;
+ ASSERT_NE(EGL_NO_SURFACE, mEglSurface) ;
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mSmsEglSurface, mSmsEglSurface,
+ EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface,
mEglContext));
ASSERT_EQ(EGL_SUCCESS, eglGetError());
}
@@ -778,9 +769,9 @@
// Test to examine whether we can choose the Recordable Android GLConfig
// DummyRecorder used- no real encoding here
-TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWrite) {
+TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWriter) {
LOGV("Test # %d", testId++);
- LOGV("Test to verify creating a surface w/ right config *********");
+ LOGV("Verify creating a surface w/ right config + dummy writer*********");
mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
mSTC = new SurfaceTextureClient(mSMS);
@@ -789,17 +780,12 @@
DummyRecorder writer(mSMS);
writer.start();
- EGLint numConfigs = 0;
- EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mSMSGlConfig,
- 1, &numConfigs));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
-
- mSmsEglSurface = eglCreateWindowSurface(mEglDisplay, mSMSGlConfig,
+ mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
mANW.get(), NULL);
ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_SURFACE, mSmsEglSurface) ;
+ ASSERT_NE(EGL_NO_SURFACE, mEglSurface) ;
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mSmsEglSurface, mSmsEglSurface,
+ EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface,
mEglContext));
ASSERT_EQ(EGL_SUCCESS, eglGetError());
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 744fa50..94efa74 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -151,7 +151,7 @@
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
mPrimaryHardwareDev(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
- mBtNrec(false)
+ mBtNrecIsOff(false)
{
}
@@ -751,15 +751,15 @@
String8 value;
if (param.get(String8(AUDIO_PARAMETER_KEY_BT_NREC), value) == NO_ERROR) {
Mutex::Autolock _l(mLock);
- bool btNrec = (value == AUDIO_PARAMETER_VALUE_ON);
- if (mBtNrec != btNrec) {
+ bool btNrecIsOff = (value == AUDIO_PARAMETER_VALUE_OFF);
+ if (mBtNrecIsOff != btNrecIsOff) {
for (size_t i = 0; i < mRecordThreads.size(); i++) {
sp<RecordThread> thread = mRecordThreads.valueAt(i);
RecordThread::RecordTrack *track = thread->track();
if (track != NULL) {
audio_devices_t device = (audio_devices_t)(
thread->device() & AUDIO_DEVICE_IN_ALL);
- bool suspend = audio_is_bluetooth_sco_device(device) && btNrec;
+ bool suspend = audio_is_bluetooth_sco_device(device) && btNrecIsOff;
thread->setEffectSuspended(FX_IID_AEC,
suspend,
track->sessionId());
@@ -768,7 +768,7 @@
track->sessionId());
}
}
- mBtNrec = btNrec;
+ mBtNrecIsOff = btNrecIsOff;
}
}
return final_result;
@@ -1362,6 +1362,7 @@
for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
mStreamTypes[stream].volume = mAudioFlinger->streamVolumeInternal(stream);
mStreamTypes[stream].mute = mAudioFlinger->streamMute(stream);
+ mStreamTypes[stream].valid = true;
}
}
@@ -1530,6 +1531,14 @@
chain->setStrategy(AudioSystem::getStrategyForStream((audio_stream_type_t)track->type()));
chain->incTrackCnt();
}
+
+ // invalidate track immediately if the stream type was moved to another thread since
+ // createTrack() was called by the client process.
+ if (!mStreamTypes[streamType].valid) {
+ LOGW("createTrack_l() on thread %p: invalidating track on stream %d",
+ this, streamType);
+ android_atomic_or(CBLK_INVALID_ON, &track->mCblk->flags);
+ }
}
lStatus = NO_ERROR;
@@ -2219,6 +2228,14 @@
}
}
+void AudioFlinger::PlaybackThread::setStreamValid(int streamType, bool valid)
+{
+ LOGV ("PlaybackThread::setStreamValid() thread %p, streamType %d, valid %d",
+ this, streamType, valid);
+ Mutex::Autolock _l(mLock);
+
+ mStreamTypes[streamType].valid = valid;
+}
// getTrackName_l() must be called with ThreadBase::mLock held
int AudioFlinger::MixerThread::getTrackName_l()
@@ -4394,7 +4411,7 @@
mTrack = track.get();
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
bool suspend = audio_is_bluetooth_sco_device(
- (audio_devices_t)(mDevice & AUDIO_DEVICE_IN_ALL)) && mAudioFlinger->btNrec();
+ (audio_devices_t)(mDevice & AUDIO_DEVICE_IN_ALL)) && mAudioFlinger->btNrecIsOff();
setEffectSuspended_l(FX_IID_AEC, suspend, sessionId);
setEffectSuspended_l(FX_IID_NS, suspend, sessionId);
}
@@ -4619,7 +4636,7 @@
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
if (mTrack != NULL) {
bool suspend = audio_is_bluetooth_sco_device(
- (audio_devices_t)value) && mAudioFlinger->btNrec();
+ (audio_devices_t)value) && mAudioFlinger->btNrecIsOff();
setEffectSuspended_l(FX_IID_AEC, suspend, mTrack->sessionId());
setEffectSuspended_l(FX_IID_NS, suspend, mTrack->sessionId());
}
@@ -5074,11 +5091,14 @@
LOGV("setStreamOutput() stream %d to output %d", stream, output);
audioConfigChanged_l(AudioSystem::STREAM_CONFIG_CHANGED, output, &stream);
+ dstThread->setStreamValid(stream, true);
+
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
if (thread != dstThread &&
thread->type() != ThreadBase::DIRECT) {
MixerThread *srcThread = (MixerThread *)thread;
+ srcThread->setStreamValid(stream, false);
srcThread->invalidateTracks(stream);
}
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1141f6c..2e05593 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -210,7 +210,7 @@
uint32_t getMode() { return mMode; }
- bool btNrec() { return mBtNrec; }
+ bool btNrecIsOff() { return mBtNrecIsOff; }
private:
AudioFlinger();
@@ -751,14 +751,18 @@
virtual uint32_t hasAudioSession(int sessionId);
virtual uint32_t getStrategyForSession_l(int sessionId);
+ void setStreamValid(int streamType, bool valid);
+
struct stream_type_t {
stream_type_t()
: volume(1.0f),
- mute(false)
+ mute(false),
+ valid(true)
{
}
float volume;
bool mute;
+ bool valid;
};
protected:
@@ -1389,7 +1393,7 @@
DefaultKeyedVector< pid_t, sp<NotificationClient> > mNotificationClients;
volatile int32_t mNextUniqueId;
uint32_t mMode;
- bool mBtNrec;
+ bool mBtNrecIsOff;
Vector<AudioSessionRef*> mAudioSessionRefs;
};