Merge "media: Use libyuv for color conversion." into nyc-mr1-dev
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
index 09f9789..624baff 100644
--- a/camera/cameraserver/cameraserver.rc
+++ b/camera/cameraserver/cameraserver.rc
@@ -3,4 +3,5 @@
user cameraserver
group audio camera input drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
+ writepid /dev/cpuset/camera-daemon/tasks
+ writepid /dev/stune/foreground/tasks
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index f4d0acd..4f2517c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -314,11 +314,12 @@
status_t handleSetSurface(const sp<Surface> &surface);
status_t setupNativeWindowSizeFormatAndUsage(
- ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */);
+ ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
+ bool reconnect);
status_t configureOutputBuffersFromNativeWindow(
OMX_U32 *nBufferCount, OMX_U32 *nBufferSize,
- OMX_U32 *nMinUndequeuedBuffers);
+ OMX_U32 *nMinUndequeuedBuffers, bool preregister);
status_t allocateOutputMetadataBuffers();
status_t submitOutputMetadataBuffer();
void signalSubmitOutputMetadataBufferIfEOS_workaround();
diff --git a/include/media/stagefright/SurfaceUtils.h b/include/media/stagefright/SurfaceUtils.h
index c1a9c0a..13d580c 100644
--- a/include/media/stagefright/SurfaceUtils.h
+++ b/include/media/stagefright/SurfaceUtils.h
@@ -24,9 +24,14 @@
namespace android {
+/**
+ * Configures |nativeWindow| for given |width|x|height|, pixel |format|, |rotation| and |usage|.
+ * If |reconnect| is true, reconnects to the native window before hand.
+ * @return first error encountered, or NO_ERROR on success.
+ */
status_t setNativeWindowSizeFormatAndUsage(
ANativeWindow *nativeWindow /* nonnull */,
- int width, int height, int format, int rotation, int usage);
+ int width, int height, int format, int rotation, int usage, bool reconnect);
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
} // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 3961e6e..fad8350 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -2365,9 +2365,14 @@
if (location == ExtendedTimestamp::LOCATION_SERVER) {
ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
"getTimestamp() location moved from kernel to server");
+ // check that the last kernel OK time info exists and the positions
+ // are valid (if they predate the current track, the positions may
+ // be zero or negative).
const int64_t frames =
(ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
- ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
+ ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
+ ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
+ ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
?
int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
/ 1000)
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 411519d..34445e0 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -831,14 +831,15 @@
mProcessSize = (mSamplingRate * 20) / 1000;
char value[PROPERTY_VALUE_MAX];
- property_get("gsm.operator.iso-country", value, "");
- if (strcmp(value,"us") == 0 ||
- strcmp(value,"ca") == 0) {
+ if (property_get("gsm.operator.iso-country", value, "") == 0) {
+ property_get("gsm.sim.operator.iso-country", value, "");
+ }
+ if (strstr(value, "us") != NULL ||
+ strstr(value, "ca") != NULL) {
mRegion = ANSI;
- } else if (strcmp(value,"jp") == 0) {
+ } else if (strstr(value, "jp") != NULL) {
mRegion = JAPAN;
- } else if (strcmp(value,"uk") == 0 ||
- strcmp(value,"uk,uk") == 0) {
+ } else if (strstr(value, "uk") != NULL) {
mRegion = UK;
} else {
mRegion = CEPT;
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 8725dfe..fbe749c 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -395,7 +395,9 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
- if (mPlayer == 0) return INVALID_OPERATION;
+ if (mPlayer == 0 || (mCurrentState & MEDIA_PLAYER_STOPPED)) {
+ return INVALID_OPERATION;
+ }
if (rate.mSpeed != 0.f && !(mCurrentState & MEDIA_PLAYER_STARTED)
&& (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 97ba76b..32c4b8a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -820,6 +820,9 @@
break;
}
+ ALOGV("Recording frameRate: %d captureFps: %f",
+ mFrameRate, mCaptureFps);
+
return status;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 0b10ae4..4504b58 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1005,6 +1005,7 @@
sp<AMessage> inputFormat =
mSource->getFormat(false /* audio */);
+ setVideoScalingMode(mVideoScalingMode);
updateVideoSize(inputFormat, format);
} else if (what == DecoderBase::kWhatShutdownCompleted) {
ALOGV("%s shutdown completed", audio ? "audio" : "video");
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index cbc28e3..d97d5b1 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -674,7 +674,10 @@
}
int usageBits = 0;
- status_t err = setupNativeWindowSizeFormatAndUsage(nativeWindow, &usageBits);
+ // no need to reconnect as we will not dequeue all buffers
+ status_t err = setupNativeWindowSizeFormatAndUsage(
+ nativeWindow, &usageBits,
+ !storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment /* reconnect */);
if (err != OK) {
return err;
}
@@ -948,7 +951,8 @@
}
status_t ACodec::setupNativeWindowSizeFormatAndUsage(
- ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */) {
+ ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
+ bool reconnect) {
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
@@ -986,12 +990,14 @@
def.format.video.nFrameHeight,
def.format.video.eColorFormat,
mRotationDegrees,
- usage);
+ usage,
+ reconnect);
}
status_t ACodec::configureOutputBuffersFromNativeWindow(
OMX_U32 *bufferCount, OMX_U32 *bufferSize,
- OMX_U32 *minUndequeuedBuffers) {
+ OMX_U32 *minUndequeuedBuffers, bool preregister) {
+
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
@@ -1000,7 +1006,8 @@
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err == OK) {
- err = setupNativeWindowSizeFormatAndUsage(mNativeWindow.get(), &mNativeWindowUsageBits);
+ err = setupNativeWindowSizeFormatAndUsage(
+ mNativeWindow.get(), &mNativeWindowUsageBits, preregister /* reconnect */);
}
if (err != OK) {
mNativeWindowUsageBits = 0;
@@ -1082,7 +1089,7 @@
status_t ACodec::allocateOutputBuffersFromNativeWindow() {
OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
status_t err = configureOutputBuffersFromNativeWindow(
- &bufferCount, &bufferSize, &minUndequeuedBuffers);
+ &bufferCount, &bufferSize, &minUndequeuedBuffers, true /* preregister */);
if (err != 0)
return err;
mNumUndequeuedBuffers = minUndequeuedBuffers;
@@ -1168,7 +1175,8 @@
status_t ACodec::allocateOutputMetadataBuffers() {
OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
status_t err = configureOutputBuffersFromNativeWindow(
- &bufferCount, &bufferSize, &minUndequeuedBuffers);
+ &bufferCount, &bufferSize, &minUndequeuedBuffers,
+ mLegacyAdaptiveExperiment /* preregister */);
if (err != 0)
return err;
mNumUndequeuedBuffers = minUndequeuedBuffers;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index e8cd58a..ff5c4d4 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1791,9 +1791,8 @@
err = BAD_VALUE;
} else {
err = connectToSurface(surface);
- if (err == BAD_VALUE) {
- // assuming reconnecting to same surface
- // TODO: check if it is the same surface
+ if (err == ALREADY_EXISTS) {
+ // reconnecting to same surface
err = OK;
} else {
if (err == OK) {
@@ -2683,11 +2682,17 @@
status_t MediaCodec::connectToSurface(const sp<Surface> &surface) {
status_t err = OK;
if (surface != NULL) {
+ uint64_t oldId, newId;
+ if (mSurface != NULL
+ && surface->getUniqueId(&newId) == NO_ERROR
+ && mSurface->getUniqueId(&oldId) == NO_ERROR
+ && newId == oldId) {
+ ALOGI("[%s] connecting to the same surface. Nothing to do.", mComponentName.c_str());
+ return ALREADY_EXISTS;
+ }
+
err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
- if (err == BAD_VALUE) {
- ALOGI("native window already connected. Assuming no change of surface");
- return err;
- } else if (err == OK) {
+ if (err == OK) {
// Require a fresh set of buffers after each connect by using a unique generation
// number. Rely on the fact that max supported process id by Linux is 2^22.
// PID is never 0 so we don't have to worry that we use the default generation of 0.
@@ -2709,7 +2714,8 @@
ALOGE("native_window_api_connect returned an error: %s (%d)", strerror(-err), err);
}
}
- return err;
+ // do not return ALREADY_EXISTS unless surfaces are the same
+ return err == ALREADY_EXISTS ? BAD_VALUE : err;
}
status_t MediaCodec::disconnectFromSurface() {
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index db33e83..8935f6a 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -122,6 +122,7 @@
{ "writer", METADATA_KEY_WRITER },
{ "compilation", METADATA_KEY_COMPILATION },
{ "isdrm", METADATA_KEY_IS_DRM },
+ { "date", METADATA_KEY_DATE },
{ "width", METADATA_KEY_VIDEO_WIDTH },
{ "height", METADATA_KEY_VIDEO_HEIGHT },
};
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index a62e1a2..377f5fd 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -22,6 +22,7 @@
#include <utils/Log.h>
#include <gui/Surface.h>
+#include "include/avc_utils.h"
#include "include/StagefrightMetadataRetriever.h"
#include <media/ICrypto.h>
@@ -237,6 +238,15 @@
int64_t timeUs;
size_t retriesLeft = kRetryCount;
bool done = false;
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ if (!success) {
+ ALOGE("Could not find mime type");
+ return NULL;
+ }
+
+ bool isAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
do {
size_t inputIndex = -1;
@@ -276,6 +286,11 @@
memcpy(codecBuffer->data(),
(const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
mediaBuffer->range_length());
+ if (isAvcOrHevc && IsIDR(codecBuffer)) {
+ // Only need to decode one IDR frame.
+ haveMoreInputs = false;
+ flags |= MediaCodec::BUFFER_FLAG_EOS;
+ }
}
mediaBuffer->release();
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 9940822..568837a 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -26,8 +26,25 @@
status_t setNativeWindowSizeFormatAndUsage(
ANativeWindow *nativeWindow /* nonnull */,
- int width, int height, int format, int rotation, int usage) {
- status_t err = native_window_set_buffers_dimensions(nativeWindow, width, height);
+ int width, int height, int format, int rotation, int usage, bool reconnect) {
+ status_t err = NO_ERROR;
+
+ // In some cases we need to reconnect so that we can dequeue all buffers
+ if (reconnect) {
+ err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ if (err != NO_ERROR) {
+ ALOGE("native_window_api_disconnect failed: %s (%d)", strerror(-err), -err);
+ return err;
+ }
+
+ err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ if (err != NO_ERROR) {
+ ALOGE("native_window_api_connect failed: %s (%d)", strerror(-err), -err);
+ return err;
+ }
+ }
+
+ err = native_window_set_buffers_dimensions(nativeWindow, width, height);
if (err != NO_ERROR) {
ALOGE("native_window_set_buffers_dimensions failed: %s (%d)", strerror(-err), -err);
return err;
@@ -124,7 +141,8 @@
}
err = setNativeWindowSizeFormatAndUsage(
- nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN);
+ nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN,
+ false /* reconnect */);
if (err != NO_ERROR) {
goto error;
}
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 935f1dc..c04549a 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -701,6 +701,22 @@
mLastSeqNumber = mFirstSeqNumber + mItems.size() - 1;
}
+ for (size_t i = 0; i < mItems.size(); ++i) {
+ sp<AMessage> meta = mItems.itemAt(i).mMeta;
+ const char *keys[] = {"audio", "video", "subtitles"};
+ for (size_t j = 0; j < sizeof(keys) / sizeof(const char *); ++j) {
+ AString groupID;
+ if (meta->findString(keys[j], &groupID)) {
+ ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
+ if (groupIndex < 0) {
+ ALOGE("Undefined media group '%s' referenced in stream info.",
+ groupID.c_str());
+ return ERROR_MALFORMED;
+ }
+ }
+ }
+ }
+
return OK;
}
@@ -873,15 +889,6 @@
}
const AString &groupID = unquoteString(val);
- ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
-
- if (groupIndex < 0) {
- ALOGE("Undefined media group '%s' referenced in stream info.",
- groupID.c_str());
-
- return ERROR_MALFORMED;
- }
-
key.tolower();
if (meta->get() == NULL) {
*meta = new AMessage;
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 060b6be..56ab3f6 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -200,7 +200,7 @@
// For buffer id management
OMX::buffer_id makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
- OMX_BUFFERHEADERTYPE *findBufferHeader(OMX::buffer_id buffer);
+ OMX_BUFFERHEADERTYPE *findBufferHeader(OMX::buffer_id buffer, OMX_U32 portIndex);
OMX::buffer_id findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
void invalidateBufferID(OMX::buffer_id buffer);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 995e50e..0c8fd67 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -775,7 +775,9 @@
int64_t GraphicBufferSource::getTimestamp(const BufferItem &item) {
int64_t timeUs = item.mTimestamp / 1000;
- if (mTimePerCaptureUs > 0ll) {
+ if (mTimePerCaptureUs > 0ll
+ && (mTimePerCaptureUs > 2 * mTimePerFrameUs
+ || mTimePerFrameUs > 2 * mTimePerCaptureUs)) {
// Time lapse or slow motion mode
if (mPrevCaptureUs < 0ll) {
// first capture
@@ -801,6 +803,8 @@
return mPrevFrameUs;
} else if (mMaxTimestampGapUs > 0ll) {
+ //TODO: Fix the case when mMaxTimestampGapUs and mTimePerCaptureUs are both set.
+
/* Cap timestamp gap between adjacent frames to specified max
*
* In the scenario of cast mirroring, encoding could be suspended for
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 5445944..4f1a952 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -93,19 +93,22 @@
namespace android {
struct BufferMeta {
- BufferMeta(const sp<IMemory> &mem, bool is_backup = false)
+ BufferMeta(const sp<IMemory> &mem, OMX_U32 portIndex, bool is_backup = false)
: mMem(mem),
- mIsBackup(is_backup) {
+ mIsBackup(is_backup),
+ mPortIndex(portIndex) {
}
- BufferMeta(size_t size)
+ BufferMeta(size_t size, OMX_U32 portIndex)
: mSize(size),
- mIsBackup(false) {
+ mIsBackup(false),
+ mPortIndex(portIndex) {
}
- BufferMeta(const sp<GraphicBuffer> &graphicBuffer)
+ BufferMeta(const sp<GraphicBuffer> &graphicBuffer, OMX_U32 portIndex)
: mGraphicBuffer(graphicBuffer),
- mIsBackup(false) {
+ mIsBackup(false),
+ mPortIndex(portIndex) {
}
void CopyFromOMX(const OMX_BUFFERHEADERTYPE *header) {
@@ -156,12 +159,17 @@
mNativeHandle = nativeHandle;
}
+ OMX_U32 getPortIndex() {
+ return mPortIndex;
+ }
+
private:
sp<GraphicBuffer> mGraphicBuffer;
sp<NativeHandle> mNativeHandle;
sp<IMemory> mMem;
size_t mSize;
bool mIsBackup;
+ OMX_U32 mPortIndex;
BufferMeta(const BufferMeta &);
BufferMeta &operator=(const BufferMeta &);
@@ -684,7 +692,7 @@
return BAD_VALUE;
}
- BufferMeta *buffer_meta = new BufferMeta(params);
+ BufferMeta *buffer_meta = new BufferMeta(params, portIndex);
OMX_BUFFERHEADERTYPE *header;
@@ -740,7 +748,7 @@
return UNKNOWN_ERROR;
}
- BufferMeta *bufferMeta = new BufferMeta(graphicBuffer);
+ BufferMeta *bufferMeta = new BufferMeta(graphicBuffer, portIndex);
OMX_BUFFERHEADERTYPE *header = NULL;
OMX_U8* bufferHandle = const_cast<OMX_U8*>(
@@ -802,7 +810,7 @@
return StatusFromOMXError(err);
}
- BufferMeta *bufferMeta = new BufferMeta(graphicBuffer);
+ BufferMeta *bufferMeta = new BufferMeta(graphicBuffer, portIndex);
OMX_BUFFERHEADERTYPE *header;
@@ -889,7 +897,7 @@
OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
OMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
// update backup buffer for input, codec buffer for output
return updateGraphicBufferInMeta_l(
portIndex, graphicBuffer, buffer, header,
@@ -899,7 +907,7 @@
status_t OMXNodeInstance::updateNativeHandleInMeta(
OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle, OMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
// No need to check |nativeHandle| since NULL is valid for it as below.
if (header == NULL) {
ALOGE("b/25884056");
@@ -1082,7 +1090,7 @@
Mutex::Autolock autoLock(mLock);
- BufferMeta *buffer_meta = new BufferMeta(size);
+ BufferMeta *buffer_meta = new BufferMeta(size, portIndex);
OMX_BUFFERHEADERTYPE *header;
@@ -1137,7 +1145,7 @@
return BAD_VALUE;
}
- BufferMeta *buffer_meta = new BufferMeta(params, true);
+ BufferMeta *buffer_meta = new BufferMeta(params, portIndex, true);
OMX_BUFFERHEADERTYPE *header;
@@ -1178,7 +1186,7 @@
removeActiveBuffer(portIndex, buffer);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
if (header == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -1198,7 +1206,7 @@
status_t OMXNodeInstance::fillBuffer(OMX::buffer_id buffer, int fenceFd) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexOutput);
if (header == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -1235,7 +1243,7 @@
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
if (header == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -1548,10 +1556,10 @@
if (msg.type == omx_message::FILL_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
- findBufferHeader(msg.u.extended_buffer_data.buffer);
+ findBufferHeader(msg.u.extended_buffer_data.buffer, kPortIndexOutput);
if (buffer == NULL) {
ALOGE("b/25884056");
- return BAD_VALUE;
+ return false;
}
{
@@ -1583,7 +1591,10 @@
}
} else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
- findBufferHeader(msg.u.buffer_data.buffer);
+ findBufferHeader(msg.u.buffer_data.buffer, kPortIndexInput);
+ if (buffer == NULL) {
+ return false;
+ }
{
Mutex::Autolock _l(mDebugLock);
@@ -1793,7 +1804,8 @@
return buffer;
}
-OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(OMX::buffer_id buffer) {
+OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(
+ OMX::buffer_id buffer, OMX_U32 portIndex) {
if (buffer == 0) {
return NULL;
}
@@ -1803,7 +1815,15 @@
CLOGW("findBufferHeader: buffer %u not found", buffer);
return NULL;
}
- return mBufferIDToBufferHeader.valueAt(index);
+ OMX_BUFFERHEADERTYPE *header = mBufferIDToBufferHeader.valueAt(index);
+ BufferMeta *buffer_meta =
+ static_cast<BufferMeta *>(header->pAppPrivate);
+ if (buffer_meta->getPortIndex() != portIndex) {
+ CLOGW("findBufferHeader: buffer %u found but with incorrect port index.", buffer);
+ android_errorWriteLog(0x534e4554, "28816827");
+ return NULL;
+ }
+ return header;
}
OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0523d41..83e8222 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -660,7 +660,7 @@
sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
if (mPlaybackThreads.keyAt(i) != output) {
uint32_t sessions = t->hasAudioSession(lSessionId);
- if (sessions & PlaybackThread::EFFECT_SESSION) {
+ if (sessions & ThreadBase::EFFECT_SESSION) {
effectThread = t.get();
break;
}
@@ -1694,14 +1694,14 @@
uint32_t AudioFlinger::getPrimaryOutputSamplingRate()
{
Mutex::Autolock _l(mLock);
- PlaybackThread *thread = primaryPlaybackThread_l();
+ PlaybackThread *thread = fastPlaybackThread_l();
return thread != NULL ? thread->sampleRate() : 0;
}
size_t AudioFlinger::getPrimaryOutputFrameCount()
{
Mutex::Autolock _l(mLock);
- PlaybackThread *thread = primaryPlaybackThread_l();
+ PlaybackThread *thread = fastPlaybackThread_l();
return thread != NULL ? thread->frameCountHAL() : 0;
}
@@ -1762,7 +1762,7 @@
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i);
uint32_t sessions = thread->hasAudioSession(sessionId);
- if (sessions & PlaybackThread::TRACK_SESSION) {
+ if (sessions & ThreadBase::TRACK_SESSION) {
AudioParameter param = AudioParameter();
param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value);
thread->setParameters(param.toString());
@@ -2528,6 +2528,25 @@
return thread->outDevice();
}
+AudioFlinger::PlaybackThread *AudioFlinger::fastPlaybackThread_l() const
+{
+ size_t minFrameCount = 0;
+ PlaybackThread *minThread = NULL;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+ if (!thread->isDuplicating()) {
+ size_t frameCount = thread->frameCountHAL();
+ if (frameCount != 0 && (minFrameCount == 0 || frameCount < minFrameCount ||
+ (frameCount == minFrameCount && thread->hasFastMixer() &&
+ /*minThread != NULL &&*/ !minThread->hasFastMixer()))) {
+ minFrameCount = frameCount;
+ minThread = thread;
+ }
+ }
+ }
+ return minThread;
+}
+
sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
audio_session_t triggerSession,
audio_session_t listenerSession,
@@ -2820,14 +2839,11 @@
return INVALID_OPERATION;
}
- // Check whether the destination thread has a channel count of FCC_2, which is
- // currently required for (most) effects. Prevent moving the effect chain here rather
- // than disabling the addEffect_l() call in dstThread below.
- if ((dstThread->type() == ThreadBase::MIXER || dstThread->isDuplicating()) &&
- dstThread->mChannelCount != FCC_2) {
+ // Check whether the destination thread and all effects in the chain are compatible
+ if (!chain->isCompatibleWithThread_l(dstThread)) {
ALOGW("moveEffectChain_l() effect chain failed because"
- " destination thread %p channel count(%u) != %u",
- dstThread, dstThread->mChannelCount, FCC_2);
+ " destination thread %p is not compatible with effects in the chain",
+ dstThread);
return INVALID_OPERATION;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 4a5a643..c56dcc1 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -575,6 +575,9 @@
PlaybackThread *primaryPlaybackThread_l() const;
audio_devices_t primaryOutputDevice_l() const;
+ // return the playback thread with smallest HAL buffer size, and prefer fast
+ PlaybackThread *fastPlaybackThread_l() const;
+
sp<PlaybackThread> getEffectThread_l(audio_session_t sessionId, int EffectId);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 00304b2..e6c8abc 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -558,6 +558,12 @@
if (mStatus != NO_ERROR) {
return mStatus;
}
+ if (cmdCode == EFFECT_CMD_GET_PARAM &&
+ (*replySize < sizeof(effect_param_t) ||
+ ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
+ android_errorWriteLog(0x534e4554, "29251553");
+ return -EINVAL;
+ }
status_t status = (*mEffectInterface)->command(mEffectInterface,
cmdCode,
cmdSize,
@@ -1960,4 +1966,27 @@
}
}
+bool AudioFlinger::EffectChain::hasSoftwareEffect() const
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ if (mEffects[i]->isImplementationSoftware()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// isCompatibleWithThread_l() must be called with thread->mLock held
+bool AudioFlinger::EffectChain::isCompatibleWithThread_l(const sp<ThreadBase>& thread) const
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ if (thread->checkEffectCompatibility_l(&(mEffects[i]->desc()), mSessionId) != NO_ERROR) {
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index bc9bc94..3b62652 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -117,6 +117,8 @@
void unlock() { mLock.unlock(); }
bool isOffloadable() const
{ return (mDescriptor.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) != 0; }
+ bool isImplementationSoftware() const
+ { return (mDescriptor.flags & EFFECT_FLAG_HW_ACC_MASK) == 0; }
status_t setOffloaded(bool offloaded, audio_io_handle_t io);
bool isOffloaded() const;
void addEffectToHal_l();
@@ -330,6 +332,11 @@
void syncHalEffectsState();
+ bool hasSoftwareEffect() const;
+
+ // isCompatibleWithThread_l() must be called with thread->mLock held
+ bool isCompatibleWithThread_l(const sp<ThreadBase>& thread) const;
+
void dump(int fd, const Vector<String16>& args);
protected:
@@ -361,30 +368,30 @@
void setThread(const sp<ThreadBase>& thread);
- wp<ThreadBase> mThread; // parent mixer thread
- Mutex mLock; // mutex protecting effect list
- Vector< sp<EffectModule> > mEffects; // list of effect modules
- audio_session_t mSessionId; // audio session ID
- int16_t *mInBuffer; // chain input buffer
- int16_t *mOutBuffer; // chain output buffer
+ wp<ThreadBase> mThread; // parent mixer thread
+ mutable Mutex mLock; // mutex protecting effect list
+ Vector< sp<EffectModule> > mEffects; // list of effect modules
+ audio_session_t mSessionId; // audio session ID
+ int16_t *mInBuffer; // chain input buffer
+ int16_t *mOutBuffer; // chain output buffer
// 'volatile' here means these are accessed with atomic operations instead of mutex
volatile int32_t mActiveTrackCnt; // number of active tracks connected
volatile int32_t mTrackCnt; // number of tracks connected
- int32_t mTailBufferCount; // current effect tail buffer count
- int32_t mMaxTailBuffers; // maximum effect tail buffers
- bool mOwnInBuffer; // true if the chain owns its input buffer
- int mVolumeCtrlIdx; // index of insert effect having control over volume
- uint32_t mLeftVolume; // previous volume on left channel
- uint32_t mRightVolume; // previous volume on right channel
- uint32_t mNewLeftVolume; // new volume on left channel
- uint32_t mNewRightVolume; // new volume on right channel
- uint32_t mStrategy; // strategy for this effect chain
- // mSuspendedEffects lists all effects currently suspended in the chain.
- // Use effect type UUID timelow field as key. There is no real risk of identical
- // timeLow fields among effect type UUIDs.
- // Updated by updateSuspendedSessions_l() only.
- KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
+ int32_t mTailBufferCount; // current effect tail buffer count
+ int32_t mMaxTailBuffers; // maximum effect tail buffers
+ bool mOwnInBuffer; // true if the chain owns its input buffer
+ int mVolumeCtrlIdx; // index of insert effect having control over volume
+ uint32_t mLeftVolume; // previous volume on left channel
+ uint32_t mRightVolume; // previous volume on right channel
+ uint32_t mNewLeftVolume; // new volume on left channel
+ uint32_t mNewRightVolume; // new volume on right channel
+ uint32_t mStrategy; // strategy for this effect chain
+ // mSuspendedEffects lists all effects currently suspended in the chain.
+ // Use effect type UUID timelow field as key. There is no real risk of identical
+ // timeLow fields among effect type UUIDs.
+ // Updated by updateSuspendedSessions_l() only.
+ KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
volatile int32_t mForceVolume; // force next volume command because a new effect was enabled
};
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 546ef25..01f3939 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -482,8 +482,6 @@
} else {
// HAL reported that more frames were presented than were written
mNativeFramesWrittenButNotPresented = 0;
- mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
status = INVALID_OPERATION;
}
}
@@ -494,6 +492,10 @@
// fetch server time if we can't get timestamp
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
systemTime(SYSTEM_TIME_MONOTONIC);
+ // clear out kernel cached position as this may get rapidly stale
+ // if we never get a new valid timestamp
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
}
}
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index c6d8266..6f827d9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1256,6 +1256,135 @@
}
}
+// checkEffectCompatibility_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::RecordThread::checkEffectCompatibility_l(
+ const effect_descriptor_t *desc, audio_session_t sessionId)
+{
+ // No global effect sessions on record threads
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ // only pre processing effects on record thread
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) {
+ ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on record thread %s",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ audio_input_flags_t flags = mInput->flags;
+ if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) {
+ if (flags & AUDIO_INPUT_FLAG_RAW) {
+ ALOGW("checkEffectCompatibility_l(): effect %s on record thread %s in raw mode",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
+ ALOGW("checkEffectCompatibility_l(): non HW effect %s on record thread %s in fast mode",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ }
+ return NO_ERROR;
+}
+
+// checkEffectCompatibility_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::PlaybackThread::checkEffectCompatibility_l(
+ const effect_descriptor_t *desc, audio_session_t sessionId)
+{
+ // no preprocessing on playback threads
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
+ ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback"
+ " thread %s", desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+
+ switch (mType) {
+ case MIXER: {
+ // Reject any effect on mixer multichannel sinks.
+ // TODO: fix both format and multichannel issues with effects.
+ if (mChannelCount != FCC_2) {
+ ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d) on MIXER"
+ " thread %s", desc->name, mChannelCount, mThreadName);
+ return BAD_VALUE;
+ }
+ audio_output_flags_t flags = mOutput->flags;
+ if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) {
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+ // global effects are applied only to non fast tracks if they are SW
+ if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
+ break;
+ }
+ } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ // only post processing on output stage session
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+ ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
+ " on output stage session", desc->name);
+ return BAD_VALUE;
+ }
+ } else {
+ // no restriction on effects applied on non fast tracks
+ if ((hasAudioSession_l(sessionId) & ThreadBase::FAST_SESSION) == 0) {
+ break;
+ }
+ }
+ if (flags & AUDIO_OUTPUT_FLAG_RAW) {
+ ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode",
+ desc->name);
+ return BAD_VALUE;
+ }
+ if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
+ ALOGW("checkEffectCompatibility_l(): non HW effect %s on playback thread"
+ " in fast mode", desc->name);
+ return BAD_VALUE;
+ }
+ }
+ } break;
+ case OFFLOAD:
+ // only offloadable effects on offload thread
+ if ((desc->flags & EFFECT_FLAG_OFFLOAD_MASK) != EFFECT_FLAG_OFFLOAD_SUPPORTED) {
+ ALOGW("checkEffectCompatibility_l(): non offloadable effect %s created on"
+ " OFFLOAD thread %s", desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ break;
+ case DIRECT:
+ // Reject any effect on Direct output threads for now, since the format of
+ // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
+ ALOGW("checkEffectCompatibility_l(): effect %s on DIRECT output thread %s",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ case DUPLICATING:
+ // Reject any effect on mixer multichannel sinks.
+ // TODO: fix both format and multichannel issues with effects.
+ if (mChannelCount != FCC_2) {
+ ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d)"
+ " on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName);
+ return BAD_VALUE;
+ }
+ if ((sessionId == AUDIO_SESSION_OUTPUT_STAGE) || (sessionId == AUDIO_SESSION_OUTPUT_MIX)) {
+ ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING"
+ " thread %s", desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
+ ALOGW("checkEffectCompatibility_l(): post processing effect %s on"
+ " DUPLICATING thread %s", desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
+ ALOGW("checkEffectCompatibility_l(): HW tunneled effect %s on"
+ " DUPLICATING thread %s", desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
+ }
+
+ return NO_ERROR;
+}
+
// ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
const sp<AudioFlinger::Client>& client,
@@ -1280,54 +1409,16 @@
goto Exit;
}
- // Reject any effect on Direct output threads for now, since the format of
- // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
- if (mType == DIRECT) {
- ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s",
- desc->name, mThreadName);
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- // Reject any effect on mixer or duplicating multichannel sinks.
- // TODO: fix both format and multichannel issues with effects.
- if ((mType == MIXER || mType == DUPLICATING) && mChannelCount != FCC_2) {
- ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) %s threads",
- desc->name, mChannelCount, mType == MIXER ? "MIXER" : "DUPLICATING");
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- // Allow global effects only on offloaded and mixer threads
- if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
- switch (mType) {
- case MIXER:
- case OFFLOAD:
- break;
- case DIRECT:
- case DUPLICATING:
- case RECORD:
- default:
- ALOGW("createEffect_l() Cannot add global effect %s on thread %s",
- desc->name, mThreadName);
- lStatus = BAD_VALUE;
- goto Exit;
- }
- }
-
- // Only Pre processor effects are allowed on input threads and only on input threads
- if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
- ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
- desc->name, desc->flags, mType);
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
{ // scope for mLock
Mutex::Autolock _l(mLock);
+ lStatus = checkEffectCompatibility_l(desc, sessionId);
+ if (lStatus != NO_ERROR) {
+ goto Exit;
+ }
+
// check for existing effect chain with the requested audio session
chain = getEffectChain_l(sessionId);
if (chain == 0) {
@@ -1804,8 +1895,44 @@
}
frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
}
- ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
- frameCount, mFrameCount);
+
+ // check compatibility with audio effects.
+ { // scope for mLock
+ Mutex::Autolock _l(mLock);
+ // do not accept RAW flag if post processing are present. Note that post processing on
+ // a fast mixer are necessarily hardware
+ sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
+ if (chain != 0) {
+ ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
+ "AUDIO_OUTPUT_FLAG_RAW denied: post processing effect present");
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+ }
+ // Do not accept FAST flag if software global effects are present
+ chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
+ if (chain != 0) {
+ ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
+ "AUDIO_OUTPUT_FLAG_RAW denied: global effect present");
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+ if (chain->hasSoftwareEffect()) {
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: software global effect present");
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
+ }
+ }
+ // Do not accept FAST flag if the session has software effects
+ chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
+ "AUDIO_OUTPUT_FLAG_RAW denied: effect present on session");
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+ if (chain->hasSoftwareEffect()) {
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: software effect present on session");
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
+ }
+ }
+ }
+ ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0,
+ "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
+ frameCount, mFrameCount);
} else {
ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
"mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
@@ -1814,7 +1941,7 @@
sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
audio_is_linear_pcm(format),
channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
- *flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_FAST);
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
}
}
// For normal PCM streaming tracks, update minimum frame count.
@@ -2400,9 +2527,9 @@
}
}
-uint32_t AudioFlinger::PlaybackThread::hasAudioSession(audio_session_t sessionId) const
+// hasAudioSession_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::PlaybackThread::hasAudioSession_l(audio_session_t sessionId) const
{
- Mutex::Autolock _l(mLock);
uint32_t result = 0;
if (getEffectChain_l(sessionId) != 0) {
result = EFFECT_SESSION;
@@ -2412,6 +2539,9 @@
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() && !track->isInvalid()) {
result |= TRACK_SESSION;
+ if (track->isFastTrack()) {
+ result |= FAST_SESSION;
+ }
break;
}
}
@@ -2913,7 +3043,7 @@
if (timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
kernelLocationUpdate = true;
} else {
- ALOGV("getTimestamp error - no valid kernel position");
+ ALOGVV("getTimestamp error - no valid kernel position");
}
// copy over kernel info
@@ -3196,6 +3326,9 @@
" ret(%zd) deltaMs(%d) requires sleep %d ms",
this, ret, deltaMs, throttleMs);
mThreadThrottleTimeMs += throttleMs;
+ // Throttle must be attributed to the previous mixer loop's write time
+ // to allow back-to-back throttling.
+ lastWriteFinished += throttleMs * 1000000;
} else {
uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
if (diff > 0) {
@@ -6333,8 +6466,22 @@
// there are sufficient fast track slots available
mFastTrackAvail
) {
- ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
- frameCount, mFrameCount);
+ // check compatibility with audio effects.
+ Mutex::Autolock _l(mLock);
+ // Do not accept FAST flag if the session has software effects
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ ALOGV_IF((*flags & AUDIO_INPUT_FLAG_RAW) != 0,
+ "AUDIO_INPUT_FLAG_RAW denied: effect present on session");
+ *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_RAW);
+ if (chain->hasSoftwareEffect()) {
+ ALOGV("AUDIO_INPUT_FLAG_FAST denied: software effect present on session");
+ *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
+ }
+ }
+ ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
+ "AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
+ frameCount, mFrameCount);
} else {
ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
"format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
@@ -7224,9 +7371,9 @@
return mInput->stream->get_input_frames_lost(mInput->stream);
}
-uint32_t AudioFlinger::RecordThread::hasAudioSession(audio_session_t sessionId) const
+// hasAudioSession_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::RecordThread::hasAudioSession_l(audio_session_t sessionId) const
{
- Mutex::Autolock _l(mLock);
uint32_t result = 0;
if (getEffectChain_l(sessionId) != 0) {
result = EFFECT_SESSION;
@@ -7235,6 +7382,9 @@
for (size_t i = 0; i < mTracks.size(); ++i) {
if (sessionId == mTracks[i]->sessionId()) {
result |= TRACK_SESSION;
+ if (mTracks[i]->isFastTrack()) {
+ result |= FAST_SESSION;
+ }
break;
}
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 0b4fbb9..1bfbca9 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -301,8 +301,10 @@
enum effect_state {
EFFECT_SESSION = 0x1, // the audio session corresponds to at least one
// effect
- TRACK_SESSION = 0x2 // the audio session corresponds to at least one
+ TRACK_SESSION = 0x2, // the audio session corresponds to at least one
// track
+ FAST_SESSION = 0x4 // the audio session corresponds to at least one
+ // fast track
};
// get effect chain corresponding to session Id.
@@ -335,9 +337,16 @@
void removeEffect_l(const sp< EffectModule>& effect);
// detach all tracks connected to an auxiliary effect
virtual void detachAuxEffect_l(int effectId __unused) {}
- // returns either EFFECT_SESSION if effects on this audio session exist in one
- // chain, or TRACK_SESSION if tracks on this audio session exist, or both
- virtual uint32_t hasAudioSession(audio_session_t sessionId) const = 0;
+ // returns a combination of:
+ // - EFFECT_SESSION if effects on this audio session exist in one chain
+ // - TRACK_SESSION if tracks on this audio session exist
+ // - FAST_SESSION if fast tracks on this audio session exist
+ virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const = 0;
+ uint32_t hasAudioSession(audio_session_t sessionId) const {
+ Mutex::Autolock _l(mLock);
+ return hasAudioSession_l(sessionId);
+ }
+
// the value returned by default implementation is not important as the
// strategy is only meaningful for PlaybackThread which implements this method
virtual uint32_t getStrategyForSession_l(audio_session_t sessionId __unused)
@@ -374,6 +383,10 @@
void systemReady();
+ // checkEffectCompatibility_l() must be called with ThreadBase::mLock held
+ virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
+ audio_session_t sessionId) = 0;
+
mutable Mutex mLock;
protected:
@@ -506,6 +519,9 @@
// RefBase
virtual void onFirstRef();
+ virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
+ audio_session_t sessionId);
+
protected:
// Code snippets that were lifted up out of threadLoop()
virtual void threadLoop_mix() = 0;
@@ -605,7 +621,7 @@
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
- virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
+ virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const;
virtual uint32_t getStrategyForSession_l(audio_session_t sessionId);
@@ -1292,7 +1308,7 @@
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
- virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
+ virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const;
// Return the set of unique session IDs across all tracks.
// The keys are the session IDs, and the associated values are meaningless.
@@ -1308,6 +1324,9 @@
bool hasFastCapture() const { return mFastCapture != 0; }
virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
+ audio_session_t sessionId);
+
private:
// Enter standby if not already in standby, and set mStandby flag
void standbyIfNotAlreadyInStandby();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 3b075fa..0e64716 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -4375,7 +4375,7 @@
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curDevices =
- getDeviceForStrategy((routing_strategy)curStrategy, true /*fromCache*/);
+ getDeviceForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
for (size_t i = 0; i < outputs.size(); i++) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index c8e64fe..005dd69 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -322,6 +322,9 @@
p.fastInfo.bestStillCaptureFpsRange[0],
p.fastInfo.bestStillCaptureFpsRange[1]);
+ result.appendFormat(" Use zero shutter lag: %s\n",
+ p.useZeroShutterLag() ? "yes" : "no");
+
result.append(" Current streams:\n");
result.appendFormat(" Preview stream ID: %d\n",
getPreviewStreamId());
@@ -813,7 +816,7 @@
}
}
- if (params.zslMode && !params.recordingHint &&
+ if (params.useZeroShutterLag() &&
getRecordingStreamId() == NO_STREAM) {
res = updateProcessorStream(mZslProcessor, params);
if (res != OK) {
@@ -1362,7 +1365,7 @@
return OK;
}
- if (l.mParameters.zslMode) {
+ if (l.mParameters.allowZslMode) {
mZslProcessor->clearZslQueue();
}
}
@@ -1460,7 +1463,7 @@
// Clear ZSL buffer queue when Jpeg size is changed.
bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
- if (l.mParameters.zslMode && jpegStreamChanged) {
+ if (l.mParameters.allowZslMode && jpegStreamChanged) {
ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed",
__FUNCTION__, mCameraId);
mZslProcessor->clearZslQueue();
@@ -1495,7 +1498,7 @@
if (res != OK) return res;
Parameters::focusMode_t focusModeAfter = l.mParameters.focusMode;
- if (l.mParameters.zslMode && focusModeAfter != focusModeBefore) {
+ if (l.mParameters.allowZslMode && focusModeAfter != focusModeBefore) {
mZslProcessor->clearZslQueue();
}
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index e3d6906..05adb29 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -338,7 +338,7 @@
return DONE;
}
- else if (l.mParameters.zslMode &&
+ else if (l.mParameters.useZeroShutterLag() &&
l.mParameters.state == Parameters::STILL_CAPTURE &&
l.mParameters.flashMode != Parameters::FLASH_MODE_ON) {
nextState = ZSL_START;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index ffe96fc..d6d8dde 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -67,10 +67,15 @@
}
void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
- Mutex::Autolock l(mInputMutex);
ALOGV("%s", __FUNCTION__);
-
if (bufferInfo.mError) {
+ // Only lock in case of error, since we get one of these for each
+ // onFrameAvailable as well, and scheduling may delay this call late
+ // enough to run into later preview restart operations, for non-error
+ // cases.
+ // b/29524651
+ ALOGV("%s: JPEG buffer lost", __FUNCTION__);
+ Mutex::Autolock l(mInputMutex);
mCaptureDone = true;
mCaptureSuccess = false;
mCaptureDoneSignal.signal();
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 5779176..9d5f33c 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -908,12 +908,12 @@
property_get("camera.disable_zsl_mode", value, "0");
if (!strcmp(value,"1") || slowJpegMode) {
ALOGI("Camera %d: Disabling ZSL mode", cameraId);
- zslMode = false;
+ allowZslMode = false;
} else {
- zslMode = true;
+ allowZslMode = true;
}
- ALOGI("%s: zslMode: %d slowJpegMode %d", __FUNCTION__, zslMode, slowJpegMode);
+ ALOGI("%s: allowZslMode: %d slowJpegMode %d", __FUNCTION__, allowZslMode, slowJpegMode);
state = STOPPED;
@@ -1127,6 +1127,8 @@
ALOGV("Camera %d: Flexible YUV %s supported",
cameraId, fastInfo.useFlexibleYuv ? "is" : "is not");
+ fastInfo.maxJpegSize = getMaxSize(getAvailableJpegSizes());
+
return OK;
}
@@ -2231,6 +2233,25 @@
return pictureSizeOverriden;
}
+bool Parameters::useZeroShutterLag() const {
+ // If ZSL mode is disabled, don't use it
+ if (!allowZslMode) return false;
+ // If recording hint is enabled, don't do ZSL
+ if (recordingHint) return false;
+ // If still capture size is no bigger than preview or video size,
+ // don't do ZSL
+ if (pictureWidth <= previewWidth || pictureHeight <= previewHeight ||
+ pictureWidth <= videoWidth || pictureHeight <= videoHeight) {
+ return false;
+ }
+ // If still capture size is less than quarter of max, don't do ZSL
+ if ((pictureWidth * pictureHeight) <
+ (fastInfo.maxJpegSize.width * fastInfo.maxJpegSize.height / 4) ) {
+ return false;
+ }
+ return true;
+}
+
const char* Parameters::getStateName(State state) {
#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
switch(state) {
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index c437722..f4bb34c 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -161,9 +161,9 @@
bool previewCallbackOneShot;
bool previewCallbackSurface;
- bool zslMode;
+ bool allowZslMode;
// Whether the jpeg stream is slower than 30FPS and can slow down preview.
- // When slowJpegMode is true, zslMode must be false to avoid slowing down preview.
+ // When slowJpegMode is true, allowZslMode must be false to avoid slowing down preview.
bool slowJpegMode;
// Overall camera state
@@ -219,6 +219,7 @@
DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
float minFocalLength;
bool useFlexibleYuv;
+ Size maxJpegSize;
} fastInfo;
// Quirks information; these are short-lived flags to enable workarounds for
@@ -271,6 +272,8 @@
status_t recoverOverriddenJpegSize();
// if video snapshot size is currently overridden
bool isJpegSizeOverridden();
+ // whether zero shutter lag should be used for non-recording operation
+ bool useZeroShutterLag() const;
// Calculate the crop region rectangle, either tightly about the preview
// resolution, or a region just based on the active array; both take
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index a2c9712..de42fb2 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -115,7 +115,7 @@
// Use CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG for ZSL streaming case.
if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_0) {
- if (params.zslMode && !params.recordingHint) {
+ if (params.useZeroShutterLag() && !params.recordingHint) {
res = device->createDefaultRequest(CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG,
&mPreviewRequest);
} else {
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index ae20887..1f01144 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -43,7 +43,8 @@
Camera3BufferManager::~Camera3BufferManager() {
}
-status_t Camera3BufferManager::registerStream(const StreamInfo& streamInfo) {
+status_t Camera3BufferManager::registerStream(wp<Camera3OutputStream>& stream,
+ const StreamInfo& streamInfo) {
ATRACE_CALL();
int streamId = streamInfo.streamId;
@@ -112,6 +113,8 @@
}
currentStreamSet.streamInfoMap.add(streamId, streamInfo);
currentStreamSet.handoutBufferCountMap.add(streamId, 0);
+ currentStreamSet.attachedBufferCountMap.add(streamId, 0);
+ mStreamMap.add(streamId, stream);
// The max allowed buffer count should be the max of buffer count of each stream inside a stream
// set.
@@ -124,6 +127,7 @@
status_t Camera3BufferManager::unregisterStream(int streamId, int streamSetId) {
ATRACE_CALL();
+
Mutex::Autolock l(mLock);
ALOGV("%s: unregister stream %d with stream set %d", __FUNCTION__,
streamId, streamSetId);
@@ -142,9 +146,11 @@
StreamSet& currentSet = mStreamSetMap.editValueFor(streamSetId);
BufferList& freeBufs = currentSet.freeBuffers;
BufferCountMap& handOutBufferCounts = currentSet.handoutBufferCountMap;
+ BufferCountMap& attachedBufferCounts = currentSet.attachedBufferCountMap;
InfoMap& infoMap = currentSet.streamInfoMap;
removeBuffersFromBufferListLocked(freeBufs, streamId);
handOutBufferCounts.removeItem(streamId);
+ attachedBufferCounts.removeItem(streamId);
// Remove the stream info from info map and recalculate the buffer count water mark.
infoMap.removeItem(streamId);
@@ -154,6 +160,8 @@
currentSet.maxAllowedBufferCount = infoMap[i].totalBufferCount;
}
}
+ mStreamMap.removeItem(streamId);
+
// Lazy solution: when a stream is unregistered, the streams will be reconfigured, reset
// the water mark and let it grow again.
currentSet.allocatedBufferWaterMark = 0;
@@ -193,6 +201,16 @@
return INVALID_OPERATION;
}
+ BufferCountMap& attachedBufferCounts = streamSet.attachedBufferCountMap;
+ size_t& attachedBufferCount = attachedBufferCounts.editValueFor(streamId);
+ if (attachedBufferCount > bufferCount) {
+ // We've already attached more buffers to this stream than we currently have
+ // outstanding, so have the stream just use an already-attached buffer
+ bufferCount++;
+ return ALREADY_EXISTS;
+ }
+ ALOGV("Stream %d set %d: Get buffer for stream: Allocate new", streamId, streamSetId);
+
GraphicBufferEntry buffer =
getFirstBufferFromBufferListLocked(streamSet.freeBuffers, streamId);
@@ -215,8 +233,9 @@
ALOGV("%s: allocation done", __FUNCTION__);
}
- // Increase the hand-out buffer count for tracking purpose.
+ // Increase the hand-out and attached buffer counts for tracking purposes.
bufferCount++;
+ attachedBufferCount++;
// Update the water mark to be the max hand-out buffer count + 1. An additional buffer is
// added to reduce the chance of buffer allocation during stream steady state, especially
// for cases where one stream is active, the other stream may request some buffers randomly.
@@ -235,12 +254,25 @@
// buffers for them.
StreamId firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
if (streamSet.streamInfoMap.size() > 1) {
+ bool freeBufferIsAttached = false;
for (size_t i = 0; i < streamSet.streamInfoMap.size(); i++) {
firstOtherStreamId = streamSet.streamInfoMap[i].streamId;
- if (firstOtherStreamId != streamId &&
- hasBufferForStreamLocked(streamSet.freeBuffers, firstOtherStreamId)) {
- break;
+ if (firstOtherStreamId != streamId) {
+
+ size_t otherBufferCount =
+ streamSet.handoutBufferCountMap.valueFor(firstOtherStreamId);
+ size_t otherAttachedBufferCount =
+ streamSet.attachedBufferCountMap.valueFor(firstOtherStreamId);
+ if (otherAttachedBufferCount > otherBufferCount) {
+ freeBufferIsAttached = true;
+ break;
+ }
+ if (hasBufferForStreamLocked(streamSet.freeBuffers, firstOtherStreamId)) {
+ freeBufferIsAttached = false;
+ break;
+ }
}
+ firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
}
if (firstOtherStreamId == CAMERA3_STREAM_ID_INVALID) {
return OK;
@@ -249,12 +281,39 @@
// This will drop the reference to one free buffer, which will effectively free one
// buffer (from the free buffer list) for the inactive streams.
size_t totalAllocatedBufferCount = streamSet.freeBuffers.size();
- for (size_t i = 0; i < streamSet.handoutBufferCountMap.size(); i++) {
- totalAllocatedBufferCount += streamSet.handoutBufferCountMap[i];
+ for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
+ totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
}
if (totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark) {
ALOGV("%s: free a buffer from stream %d", __FUNCTION__, firstOtherStreamId);
- getFirstBufferFromBufferListLocked(streamSet.freeBuffers, firstOtherStreamId);
+ if (freeBufferIsAttached) {
+ ALOGV("Stream %d: Freeing buffer: detach", firstOtherStreamId);
+ sp<Camera3OutputStream> stream =
+ mStreamMap.valueFor(firstOtherStreamId).promote();
+ if (stream == nullptr) {
+ ALOGE("%s: unable to promote stream %d to detach buffer", __FUNCTION__,
+ firstOtherStreamId);
+ return INVALID_OPERATION;
+ }
+
+ // Detach and then drop the buffer.
+ //
+ // Need to unlock because the stream may also be calling
+ // into the buffer manager in parallel to signal buffer
+ // release, or acquire a new buffer.
+ {
+ mLock.unlock();
+ sp<GraphicBuffer> buffer;
+ stream->detachBuffer(&buffer, /*fenceFd*/ nullptr);
+ mLock.lock();
+ }
+ size_t& otherAttachedBufferCount =
+ streamSet.attachedBufferCountMap.editValueFor(firstOtherStreamId);
+ otherAttachedBufferCount--;
+ } else {
+ // Droppable buffer is in the free buffer list, grab and drop
+ getFirstBufferFromBufferListLocked(streamSet.freeBuffers, firstOtherStreamId);
+ }
}
}
} else {
@@ -265,6 +324,37 @@
return OK;
}
+status_t Camera3BufferManager::onBufferReleased(int streamId, int streamSetId) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ ALOGV("Stream %d set %d: Buffer released", streamId, streamSetId);
+ if (mAllocator == NULL) {
+ ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
+ ALOGV("%s: signaling buffer release for an already unregistered stream "
+ "(stream %d with set id %d)", __FUNCTION__, streamId, streamSetId);
+ return OK;
+ }
+
+ if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
+ StreamSet& streamSet = mStreamSetMap.editValueFor(streamSetId);
+ BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
+ size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
+ bufferCount--;
+ ALOGV("%s: Stream %d set %d: Buffer count now %zu", __FUNCTION__, streamId, streamSetId,
+ bufferCount);
+ } else {
+ // TODO: implement gralloc V1 support
+ return BAD_VALUE;
+ }
+
+ return OK;
+}
+
status_t Camera3BufferManager::returnBufferForStream(int streamId,
int streamSetId, const sp<GraphicBuffer>& buffer, int fenceFd) {
ATRACE_CALL();
@@ -295,10 +385,12 @@
}
}
- // Update the hand-out buffer count for this buffer.
+ // Update the handed out and attached buffer count for this buffer.
BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
bufferCount--;
+ size_t& attachedBufferCount = streamSet.attachedBufferCountMap.editValueFor(streamId);
+ attachedBufferCount--;
} else {
// TODO: implement this.
return BAD_VALUE;
@@ -329,6 +421,13 @@
lines.appendFormat(" stream id: %d, buffer count: %zu.\n",
streamId, bufferCount);
}
+ lines.appendFormat(" Attached buffer counts:\n");
+ for (size_t m = 0; m < mStreamSetMap[i].attachedBufferCountMap.size(); m++) {
+ int streamId = mStreamSetMap[i].attachedBufferCountMap.keyAt(m);
+ size_t bufferCount = mStreamSetMap[i].attachedBufferCountMap.valueAt(m);
+ lines.appendFormat(" stream id: %d, attached buffer count: %zu.\n",
+ streamId, bufferCount);
+ }
lines.appendFormat(" Free buffer count: %zu\n",
mStreamSetMap[i].freeBuffers.size());
@@ -394,9 +493,6 @@
}
}
- ALOGW_IF(i == bufferList.end(), "%s: Unable to find buffers for stream %d",
- __FUNCTION__, streamId);
-
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.h b/services/camera/libcameraservice/device3/Camera3BufferManager.h
index 7942ae6..ab6541e 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.h
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.h
@@ -29,6 +29,7 @@
namespace camera3 {
struct StreamInfo;
+class Camera3OutputStream;
/**
* A class managing the graphic buffers that is used by camera output streams. It allocates and
@@ -81,7 +82,7 @@
* and other streams that were already registered with the same stream set
* ID.
*/
- status_t registerStream(const StreamInfo &streamInfo);
+ status_t registerStream(wp<Camera3OutputStream>& stream, const StreamInfo &streamInfo);
/**
* This method unregisters a stream from this buffer manager.
@@ -114,6 +115,8 @@
* Return values:
*
* OK: Getting buffer for this stream was successful.
+ * ALREADY_EXISTS: Enough free buffers are already attached to this output buffer queue,
+ * user should just dequeue from the buffer queue.
* BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
* combination doesn't match what was registered, or this stream wasn't registered
* to this buffer manager before.
@@ -122,6 +125,28 @@
status_t getBufferForStream(int streamId, int streamSetId, sp<GraphicBuffer>* gb, int* fenceFd);
/**
+ * This method notifies the manager that a buffer has been released by the consumer.
+ *
+ * The buffer is not returned to the buffer manager, but is available for the stream the buffer
+ * is attached to for dequeuing.
+ *
+ * The notification lets the manager know how many buffers are directly available to the stream.
+ *
+ * If onBufferReleased is called for a given released buffer,
+ * returnBufferForStream may not be called for the same buffer, until the
+ * buffer has been reused. The manager will call detachBuffer on the stream
+ * if it needs the released buffer otherwise.
+ *
+ * Return values:
+ *
+ * OK: Buffer release was processed succesfully
+ * BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
+ * combination doesn't match what was registered, or this stream wasn't registered
+ * to this buffer manager before.
+ */
+ status_t onBufferReleased(int streamId, int streamSetId);
+
+ /**
* This method returns a buffer for a stream to this buffer manager.
*
* When a buffer is returned, it is treated as a free buffer and may either be reused for future
@@ -245,6 +270,12 @@
* The count of the buffers that were handed out to the streams of this set.
*/
BufferCountMap handoutBufferCountMap;
+ /**
+ * The count of the buffers that are attached to the streams of this set.
+ * An attached buffer may be free or handed out
+ */
+ BufferCountMap attachedBufferCountMap;
+
StreamSet() {
allocatedBufferWaterMark = 0;
maxAllowedBufferCount = 0;
@@ -256,6 +287,7 @@
*/
typedef int StreamSetId;
KeyedVector<StreamSetId, StreamSet> mStreamSetMap;
+ KeyedVector<StreamId, wp<Camera3OutputStream>> mStreamMap;
// TODO: There is no easy way to query the Gralloc version in this code yet, we have different
// code paths for different Gralloc versions, hardcode something here for now.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 140b424..bbe7317 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2733,6 +2733,7 @@
mParent(parent),
mStatusTracker(statusTracker),
mHal3Device(hal3Device),
+ mListener(nullptr),
mId(getId(parent)),
mReconfigured(false),
mDoPause(false),
@@ -3790,7 +3791,8 @@
*/
Camera3Device::PreparerThread::PreparerThread() :
- Thread(/*canCallJava*/false), mActive(false), mCancelNow(false) {
+ Thread(/*canCallJava*/false), mListener(nullptr),
+ mActive(false), mCancelNow(false) {
}
Camera3Device::PreparerThread::~PreparerThread() {
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index c74038b..5123785 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -76,6 +76,13 @@
return OK;
}
+status_t Camera3DummyStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
+ (void) buffer;
+ (void) fenceFd;
+ // Do nothing
+ return OK;
+}
+
status_t Camera3DummyStream::configureQueueLocked() {
// Do nothing
return OK;
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 6c8859c..639619e 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -54,6 +54,8 @@
status_t setTransform(int transform);
+ virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+
/**
* Return if this output stream is for video encoding.
*/
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 4824974..cb39244 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -124,6 +124,7 @@
switch (mState) {
case STATE_IN_RECONFIG:
case STATE_CONFIGURED:
+ case STATE_ABANDONED:
// OK
break;
default:
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index dcadf36..299435a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -156,24 +156,35 @@
ANativeWindowBuffer* anb;
int fenceFd = -1;
+ bool gotBufferFromManager = false;
+
if (mUseBufferManager) {
sp<GraphicBuffer> gb;
res = mBufferManager->getBufferForStream(getId(), getStreamSetId(), &gb, &fenceFd);
- if (res != OK) {
+ if (res == OK) {
+ // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
+ // successful return.
+ anb = gb.get();
+ res = mConsumer->attachBuffer(anb);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ gotBufferFromManager = true;
+ ALOGV("Stream %d: Attached new buffer", getId());
+ } else if (res == ALREADY_EXISTS) {
+ // Have sufficient free buffers already attached, can just
+ // dequeue from buffer queue
+ ALOGV("Stream %d: Reusing attached buffer", getId());
+ gotBufferFromManager = false;
+ } else if (res != OK) {
ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
- // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
- // successful return.
- anb = gb.get();
- res = mConsumer->attachBuffer(anb);
- if (res != OK) {
- ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- return res;
- }
- } else {
+ }
+ if (!gotBufferFromManager) {
/**
* Release the lock briefly to avoid deadlock for below scenario:
* Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
@@ -260,6 +271,7 @@
*/
if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
// Cancel buffer
+ ALOGW("A frame is dropped for stream %d", mId);
res = currentConsumer->cancelBuffer(currentConsumer.get(),
container_of(buffer.buffer, ANativeWindowBuffer, handle),
anwReleaseFence);
@@ -469,10 +481,15 @@
* HAL3.2 devices may not support the dynamic buffer registeration.
*/
if (mBufferManager != 0 && mSetId > CAMERA3_STREAM_SET_ID_INVALID) {
+ uint32_t consumerUsage = 0;
+ getEndpointUsage(&consumerUsage);
StreamInfo streamInfo(
getId(), getStreamSetId(), getWidth(), getHeight(), getFormat(), getDataSpace(),
- camera3_stream::usage, mTotalBufferCount, /*isConfigured*/true);
- res = mBufferManager->registerStream(streamInfo);
+ camera3_stream::usage | consumerUsage, mTotalBufferCount,
+ /*isConfigured*/true);
+ wp<Camera3OutputStream> weakThis(this);
+ res = mBufferManager->registerStream(weakThis,
+ streamInfo);
if (res == OK) {
// Disable buffer allocation for this BufferQueue, buffer manager will take over
// the buffer allocation responsibility.
@@ -608,34 +625,49 @@
return;
}
+ ALOGV("Stream %d: Buffer released", stream->getId());
+ status_t res = stream->mBufferManager->onBufferReleased(
+ stream->getId(), stream->getStreamSetId());
+ if (res != OK) {
+ ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
+ strerror(-res), res);
+ stream->mState = STATE_ERROR;
+ }
+}
+
+status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
+ Mutex::Autolock l(mLock);
+
+ ALOGV("Stream %d: detachBuffer", getId());
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+
sp<Fence> fence;
- sp<GraphicBuffer> buffer;
- int fenceFd = -1;
- status_t res = stream->mConsumer->detachNextBuffer(&buffer, &fence);
+ status_t res = mConsumer->detachNextBuffer(buffer, &fence);
if (res == NO_MEMORY) {
// This may rarely happen, which indicates that the released buffer was freed by other
// call (e.g., attachBuffer, dequeueBuffer etc.) before reaching here. We should notify the
// buffer manager that this buffer has been freed. It's not fatal, but should be avoided,
// therefore log a warning.
- buffer = 0;
+ *buffer = 0;
ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
} else if (res != OK) {
- // Other errors are fatal.
+ // Treat other errors as abandonment
ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
- stream->mState = STATE_ERROR;
- return;
+ mState = STATE_ABANDONED;
+ return res;
}
- if (fence!= 0 && fence->isValid()) {
- fenceFd = fence->dup();
+ if (fenceFd != nullptr) {
+ if (fence!= 0 && fence->isValid()) {
+ *fenceFd = fence->dup();
+ } else {
+ *fenceFd = -1;
+ }
}
- res = stream->mBufferManager->returnBufferForStream(stream->getId(), stream->getStreamSetId(),
- buffer, fenceFd);
- if (res != OK) {
- ALOGE("%s: return buffer to buffer manager failed: %s (%d).", __FUNCTION__,
- strerror(-res), res);
- stream->mState = STATE_ERROR;
- }
+
+ return OK;
}
bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 2feca27..5507cfc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -151,6 +151,8 @@
wp<Camera3OutputStream> mParent;
};
+ virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+
/**
* Set the graphic buffer manager to get/return the stream buffers.
*
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 7c09c40..3f83c89 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -49,6 +49,16 @@
* Set the consumer surface to the output stream.
*/
virtual status_t setConsumer(sp<Surface> consumer) = 0;
+
+ /**
+ * Detach an unused buffer from the stream.
+ *
+ * buffer must be non-null; fenceFd may null, and if it is non-null, but
+ * there is no valid fence associated with the detached buffer, it will be
+ * set to -1.
+ *
+ */
+ virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
};
} // namespace camera3