Merge "Fix initial audio glitch on startup" into lmp-dev
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index c016e52..4e36160 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -37,6 +37,7 @@
parcel->readInt32(&afTriggerId);
parcel->readInt32(&precaptureTriggerId);
parcel->readInt64(&frameNumber);
+ parcel->readInt32(&partialResultCount);
return OK;
}
@@ -52,6 +53,7 @@
parcel->writeInt32(afTriggerId);
parcel->writeInt32(precaptureTriggerId);
parcel->writeInt64(frameNumber);
+ parcel->writeInt32(partialResultCount);
return OK;
}
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index 6e47a16..0be7d6f 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -53,6 +53,11 @@
int64_t frameNumber;
/**
+ * The partial result count (index) for this capture result.
+ */
+ int32_t partialResultCount;
+
+ /**
* Constructor initializes object as invalid by setting requestId to be -1.
*/
CaptureResultExtras()
@@ -60,7 +65,8 @@
burstId(0),
afTriggerId(0),
precaptureTriggerId(0),
- frameNumber(0) {
+ frameNumber(0),
+ partialResultCount(0) {
}
/**
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index a8f4605..31312d3 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -163,7 +163,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask) = 0;
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags) = 0;
virtual status_t closeInput(audio_io_handle_t input) = 0;
virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index d38d976..087d016 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -166,6 +166,13 @@
kKeyCryptoDefaultIVSize = 'cryS', // int32_t
kKeyPssh = 'pssh', // raw data
+
+ // Please see MediaFormat.KEY_IS_AUTOSELECT.
+ kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
+ // Please see MediaFormat.KEY_IS_DEFAULT.
+ kKeyTrackIsDefault = 'dflt', // bool (int32_t)
+ // Similar to MediaFormat.KEY_IS_FORCED_SUBTITLE but pertains to av tracks as well.
+ kKeyTrackIsForced = 'frcd', // bool (int32_t)
};
enum {
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 5cf42f7..7795fdb 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -532,7 +532,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags)
{
Parcel data, reply;
audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
@@ -547,6 +548,7 @@
data.writeInt32(samplingRate);
data.writeInt32(format);
data.writeInt32(channelMask);
+ data.writeInt32(flags);
remote()->transact(OPEN_INPUT, data, &reply);
audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
devices = (audio_devices_t)reply.readInt32();
@@ -1157,12 +1159,14 @@
uint32_t samplingRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
+ audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
audio_io_handle_t input = openInput(module,
&devices,
&samplingRate,
&format,
- &channelMask);
+ &channelMask,
+ flags);
reply->writeInt32((int32_t) input);
reply->writeInt32(devices);
reply->writeInt32(samplingRate);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 388f77a..cc0cb01 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -81,11 +81,12 @@
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
- sp<MediaSource> track;
+ sp<MediaSource> track = extractor->getTrack(i);
if (!strncasecmp(mime, "audio/", 6)) {
if (mAudioTrack.mSource == NULL) {
- mAudioTrack.mSource = track = extractor->getTrack(i);
+ mAudioTrack.mIndex = i;
+ mAudioTrack.mSource = track;
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
mAudioIsVorbis = true;
@@ -95,11 +96,13 @@
}
} else if (!strncasecmp(mime, "video/", 6)) {
if (mVideoTrack.mSource == NULL) {
- mVideoTrack.mSource = track = extractor->getTrack(i);
+ mVideoTrack.mIndex = i;
+ mVideoTrack.mSource = track;
}
}
if (track != NULL) {
+ mSources.push(track);
int64_t durationUs;
if (meta->findInt64(kKeyDuration, &durationUs)) {
if (durationUs > mDurationUs) {
@@ -194,6 +197,56 @@
return OK;
}
+size_t NuPlayer::GenericSource::getTrackCount() const {
+ return mSources.size();
+}
+
+sp<AMessage> NuPlayer::GenericSource::getTrackInfo(size_t trackIndex) const {
+ size_t trackCount = mSources.size();
+ if (trackIndex >= trackCount) {
+ return NULL;
+ }
+
+ sp<AMessage> format = new AMessage();
+ sp<MetaData> meta = mSources.itemAt(trackIndex)->getFormat();
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ int32_t trackType;
+ if (!strncasecmp(mime, "video/", 6)) {
+ trackType = MEDIA_TRACK_TYPE_VIDEO;
+ } else if (!strncasecmp(mime, "audio/", 6)) {
+ trackType = MEDIA_TRACK_TYPE_AUDIO;
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
+ trackType = MEDIA_TRACK_TYPE_TIMEDTEXT;
+ } else {
+ trackType = MEDIA_TRACK_TYPE_UNKNOWN;
+ }
+ format->setInt32("type", trackType);
+
+ const char *lang;
+ if (!meta->findCString(kKeyMediaLanguage, &lang)) {
+ lang = "und";
+ }
+ format->setString("language", lang);
+
+ if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
+ format->setString("mime", mime);
+
+ int32_t isAutoselect = 1, isDefault = 0, isForced = 0;
+ meta->findInt32(kKeyTrackIsAutoselect, &isAutoselect);
+ meta->findInt32(kKeyTrackIsDefault, &isDefault);
+ meta->findInt32(kKeyTrackIsForced, &isForced);
+
+ format->setInt32("auto", !!isAutoselect);
+ format->setInt32("default", !!isDefault);
+ format->setInt32("forced", !!isForced);
+ }
+
+ return format;
+}
+
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
if (mVideoTrack.mSource != NULL) {
int64_t actualTimeUs;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 20d597e..e0cd20f 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -50,6 +50,8 @@
virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
virtual status_t getDuration(int64_t *durationUs);
+ virtual size_t getTrackCount() const;
+ virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
virtual status_t seekTo(int64_t seekTimeUs);
protected:
@@ -58,7 +60,10 @@
virtual sp<MetaData> getFormatMeta(bool audio);
private:
+ Vector<sp<MediaSource> > mSources;
+
struct Track {
+ size_t mIndex;
sp<MediaSource> mSource;
sp<AnotherPacketSource> mPackets;
};
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 4f7668c..efd852c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -730,6 +730,9 @@
key.tolower();
const AString &codecs = unquoteString(val);
+ if (meta->get() == NULL) {
+ *meta = new AMessage;
+ }
(*meta)->setString(key.c_str(), codecs.c_str());
} else if (!strcasecmp("audio", key.c_str())
|| !strcasecmp("video", key.c_str())
@@ -753,6 +756,9 @@
}
key.tolower();
+ if (meta->get() == NULL) {
+ *meta = new AMessage;
+ }
(*meta)->setString(key.c_str(), groupID.c_str());
}
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 5fd7ce8..1ad6285 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1820,7 +1820,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags)
{
struct audio_config config;
memset(&config, 0, sizeof(config));
@@ -1847,15 +1848,15 @@
audio_io_handle_t id = nextUniqueId();
audio_stream_in_t *inStream = NULL;
- audio_input_flags_t flags = AUDIO_INPUT_FLAG_FAST; // FIXME until added to openInput()
status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
&inStream, flags);
ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, "
- "status %d",
+ "flags %#x, status %d",
inStream,
config.sample_rate,
config.format,
config.channel_mask,
+ flags,
status);
// If the input could not be opened with the requested parameters and we can handle the
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index be19554..bae18fd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -179,7 +179,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask);
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags);
virtual status_t closeInput(audio_io_handle_t input);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a396aaf..e3daccc 100755
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -4783,7 +4783,7 @@
, mPipeFramesP2(0)
// mPipeMemory
// mFastCaptureNBLogWriter
- , mFastTrackAvail(true)
+ , mFastTrackAvail(false)
{
snprintf(mName, kNameLength, "AudioIn_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
@@ -4895,6 +4895,7 @@
// FIXME
#endif
+ mFastTrackAvail = true;
}
failed: ;
@@ -5745,6 +5746,7 @@
} else {
dprintf(fd, " No active record clients\n");
}
+ dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
dumpBase(fd, args);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index cacb066..eb3e6b4 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1987,12 +1987,12 @@
/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append(" Active Client Fmt Chn mask Session S Server fCount Resampling\n");
+ result.append(" Active Client Fmt Chn mask Session S Server fCount SRate\n");
}
void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
{
- snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %10d\n",
+ snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n",
active ? "yes" : "no",
(mClient == 0) ? getpid_cached : mClient->pid(),
mFormat,
@@ -2001,7 +2001,7 @@
mState,
mCblk->mServer,
mFrameCount,
- mResampler != NULL);
+ mSampleRate);
}
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index c322d92..b5af089 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -101,7 +101,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
@@ -109,7 +110,7 @@
return 0;
}
- return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+ return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask, flags);
}
status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
index 53f3e2d..97e12cc 100644
--- a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
@@ -158,7 +158,8 @@
return 0;
}
- return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+ return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+ AUDIO_INPUT_FLAG_FAST /*FIXME*/);
}
audio_io_handle_t aps_open_input_on_module(void *service __unused,
@@ -174,7 +175,8 @@
return 0;
}
- return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+ return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+ AUDIO_INPUT_FLAG_FAST /*FIXME*/);
}
int aps_close_input(void *service __unused, audio_io_handle_t input)
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 33e4397..ed66e58 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -238,7 +238,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask) = 0;
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags) = 0;
// closes an audio input
virtual status_t closeInput(audio_io_handle_t input) = 0;
//
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 1b4796b..cca1b34 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -560,6 +560,13 @@
forceVolumeReeval = true;
mForceUse[usage] = config;
break;
+ case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
+ if (config != AUDIO_POLICY_FORCE_NONE &&
+ config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
+ ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
+ }
+ mForceUse[usage] = config;
+ break;
default:
ALOGW("setForceUse() invalid usage %d", usage);
break;
@@ -1104,7 +1111,8 @@
&inputDesc->mDevice,
&inputDesc->mSamplingRate,
&inputDesc->mFormat,
- &inputDesc->mChannelMask);
+ &inputDesc->mChannelMask,
+ AUDIO_INPUT_FLAG_FAST /*FIXME*/);
// only accept input with the exact requested set of parameters
if (input == 0 ||
@@ -1529,6 +1537,9 @@
result.append(buffer);
snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]);
result.append(buffer);
+ snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
+ mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO]);
+ result.append(buffer);
snprintf(buffer, SIZE, " Available output devices:\n");
result.append(buffer);
@@ -2322,7 +2333,8 @@
&inputDesc->mDevice,
&inputDesc->mSamplingRate,
&inputDesc->mFormat,
- &inputDesc->mChannelMask);
+ &inputDesc->mChannelMask,
+ AUDIO_INPUT_FLAG_FAST /*FIXME*/);
if (input != 0) {
for (size_t k = 0; k < inProfile->mSupportedDevices.size(); k++) {
@@ -2888,7 +2900,8 @@
&desc->mDevice,
&desc->mSamplingRate,
&desc->mFormat,
- &desc->mChannelMask);
+ &desc->mChannelMask,
+ AUDIO_INPUT_FLAG_FAST /*FIXME*/);
if (input != 0) {
if (!address.isEmpty()) {
@@ -3554,10 +3567,10 @@
}
int device3 = AUDIO_DEVICE_NONE;
if (strategy == STRATEGY_MEDIA) {
- // ARC, SPDIF and LINE can co-exist with others.
+ // ARC, SPDIF and AUX_LINE can co-exist with others.
device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_HDMI_ARC;
device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPDIF);
- device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE);
+ device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_LINE);
}
device2 |= device3;
@@ -3565,6 +3578,13 @@
// STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
device |= device2;
+ // If hdmi system audio mode is on, remove speaker out of output list.
+ if ((strategy == STRATEGY_MEDIA) &&
+ (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
+ AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
+ device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+ }
+
if (device) break;
device = mDefaultOutputDevice->mDeviceType;
if (device == AUDIO_DEVICE_NONE) {
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index 380fd5e..4a81423 100755
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -388,7 +388,8 @@
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask);
+ audio_channel_mask_t *pChannelMask,
+ audio_input_flags_t flags);
// closes an audio input
virtual status_t closeInput(audio_io_handle_t input);
//
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 3de5d90..312a78c 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -40,7 +40,12 @@
{
SharedParameters::Lock l(client->getParameters());
- mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+ if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ mUsePartialResult = (mNumPartialResults > 1);
+ } else {
+ mUsePartialResult = l.mParameters.quirks.partialResults;
+ }
// Initialize starting 3A state
m3aState.afTriggerId = l.mParameters.afTriggerCounter;
@@ -63,17 +68,21 @@
return false;
}
- bool partialResult = false;
- if (mUsePartialQuirk) {
- camera_metadata_entry_t entry;
- entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
- if (entry.count > 0 &&
- entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- partialResult = true;
+ bool isPartialResult = false;
+ if (mUsePartialResult) {
+ if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ isPartialResult = frame.mResultExtras.partialResultCount < mNumPartialResults;
+ } else {
+ camera_metadata_entry_t entry;
+ entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count > 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ isPartialResult = true;
+ }
}
}
- if (!partialResult && processFaceDetect(frame.mMetadata, client) != OK) {
+ if (!isPartialResult && processFaceDetect(frame.mMetadata, client) != OK) {
return false;
}
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 4afca50..68cf55b 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -91,8 +91,8 @@
}
} m3aState;
- // Whether the partial result quirk is enabled for this device
- bool mUsePartialQuirk;
+ // Whether the partial result is enabled for this device
+ bool mUsePartialResult;
// Track most recent frame number for which 3A notifications were sent for.
// Used to filter against sending 3A notifications for the same frame
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 79f75a5..ab61c44 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -94,14 +94,14 @@
entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
nsecs_t timestamp = entry.data.i64[0];
if (entry.count == 0) {
- ALOGE("%s: metadata doesn't have timestamp, skip this result");
+ ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
return;
}
(void)timestamp;
entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
if (entry.count == 0) {
- ALOGE("%s: metadata doesn't have frame number, skip this result");
+ ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
return;
}
int32_t frameNumber = entry.data.i32[0];
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index c7bd886..037695d 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -252,6 +252,10 @@
*/
virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0;
+ /**
+ * Get the HAL device version.
+ */
+ virtual uint32_t getDeviceVersion() = 0;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 482f687..29eb78f 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -29,7 +29,17 @@
FrameProcessorBase::FrameProcessorBase(wp<CameraDeviceBase> device) :
Thread(/*canCallJava*/false),
- mDevice(device) {
+ mDevice(device),
+ mNumPartialResults(1) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice != 0 &&
+ cameraDevice->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ if (entry.count > 0) {
+ mNumPartialResults = entry.data.i32[0];
+ }
+ }
}
FrameProcessorBase::~FrameProcessorBase() {
@@ -160,14 +170,18 @@
camera_metadata_ro_entry_t entry;
- // Quirks: Don't deliver partial results to listeners that don't want them
- bool quirkIsPartial = false;
- entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
- if (entry.count != 0 &&
- entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
- __FUNCTION__, device->getId());
- quirkIsPartial = true;
+ // Check if this result is partial.
+ bool isPartialResult = false;
+ if (device->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ isPartialResult = result.mResultExtras.partialResultCount < mNumPartialResults;
+ } else {
+ entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count != 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ ALOGV("%s: Camera %d: This is a partial result",
+ __FUNCTION__, device->getId());
+ isPartialResult = true;
+ }
}
// TODO: instead of getting requestID from CameraMetadata, we should get it
@@ -186,9 +200,10 @@
Mutex::Autolock l(mInputMutex);
List<RangeListener>::iterator item = mRangeListeners.begin();
+ // Don't deliver partial results to listeners that don't want them
while (item != mRangeListeners.end()) {
if (requestId >= item->minId && requestId < item->maxId &&
- (!quirkIsPartial || item->sendPartials)) {
+ (!isPartialResult || item->sendPartials)) {
sp<FilteredListener> listener = item->listener.promote();
if (listener == 0) {
item = mRangeListeners.erase(item);
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 3649c45..a618d84 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -71,6 +71,9 @@
};
List<RangeListener> mRangeListeners;
+ // Number of partial result the HAL will potentially send.
+ int32_t mNumPartialResults;
+
void processNewFrames(const sp<CameraDeviceBase> &device);
virtual bool processSingleFrame(CaptureResult &result,
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index c33c166..89c6b10 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -123,6 +123,7 @@
mDeviceInfo = info.static_camera_characteristics;
mHal2Device = device;
+ mDeviceVersion = device->common.version;
return OK;
}
@@ -589,6 +590,11 @@
return waitUntilDrained();
}
+uint32_t Camera2Device::getDeviceVersion() {
+ ATRACE_CALL();
+ return mDeviceVersion;
+}
+
/**
* Camera2Device::MetadataQueue
*/
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 22a13ac..46182f8 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -78,12 +78,16 @@
buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
// Flush implemented as just a wait
virtual status_t flush(int64_t *lastFrameNumber = NULL);
+ virtual uint32_t getDeviceVersion();
+
private:
const int mId;
camera2_device_t *mHal2Device;
CameraMetadata mDeviceInfo;
+ uint32_t mDeviceVersion;
+
/**
* Queue class for both sending requests to a camera2 device, and for
* receiving frames from a camera2 device.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6ceb9d4..3004d3e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -57,7 +57,8 @@
mId(id),
mHal3Device(NULL),
mStatus(STATUS_UNINITIALIZED),
- mUsePartialResultQuirk(false),
+ mUsePartialResult(false),
+ mNumPartialResults(1),
mNextResultFrameNumber(0),
mNextShutterFrameNumber(0),
mListener(NULL)
@@ -180,13 +181,20 @@
mNeedConfig = true;
mPauseStateNotify = false;
- /** Check for quirks */
-
// Will the HAL be sending in early partial result metadata?
- camera_metadata_entry partialResultsQuirk =
- mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
- if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
- mUsePartialResultQuirk = true;
+ if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+ camera_metadata_entry partialResultsCount =
+ mDeviceInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ if (partialResultsCount.count > 0) {
+ mNumPartialResults = partialResultsCount.data.i32[0];
+ mUsePartialResult = (mNumPartialResults > 1);
+ }
+ } else {
+ camera_metadata_entry partialResultsQuirk =
+ mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+ if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
+ mUsePartialResult = true;
+ }
}
return OK;
@@ -1267,6 +1275,12 @@
return res;
}
+uint32_t Camera3Device::getDeviceVersion() {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ return mDeviceVersion;
+}
+
/**
* Methods called by subclasses
*/
@@ -1545,11 +1559,10 @@
}
/**
- * QUIRK(partial results)
* Check if all 3A fields are ready, and send off a partial 3A-only result
* to the output frame queue
*/
-bool Camera3Device::processPartial3AQuirk(
+bool Camera3Device::processPartial3AResult(
uint32_t frameNumber,
const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
@@ -1601,7 +1614,7 @@
// In addition to the above fields, this means adding in
// android.request.frameCount
// android.request.requestId
- // android.quirks.partialResult
+ // android.quirks.partialResult (for HAL version below HAL3.2)
const size_t kMinimal3AResultEntries = 10;
@@ -1627,10 +1640,12 @@
return false;
}
- static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
- &partialResult, frameNumber)) {
- return false;
+ if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+ static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
+ if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
+ &partialResult, frameNumber)) {
+ return false;
+ }
}
if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
@@ -1668,6 +1683,9 @@
return false;
}
+ // We only send the aggregated partial when all 3A related metadata are available
+ // For both API1 and API2.
+ // TODO: we probably should pass through all partials to API2 unconditionally.
mResultSignal.signal();
return true;
@@ -1726,8 +1744,21 @@
frameNumber);
return;
}
- bool partialResultQuirk = false;
- CameraMetadata collectedQuirkResult;
+
+ // For HAL3.2 or above, If HAL doesn't support partial, it must always set
+ // partial_result to 1 when metadata is included in this result.
+ if (!mUsePartialResult &&
+ mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2 &&
+ result->result != NULL &&
+ result->partial_result != 1) {
+ SET_ERR("Result is malformed for frame %d: partial_result %u must be 1"
+ " if partial result is not supported",
+ frameNumber, result->partial_result);
+ return;
+ }
+
+ bool isPartialResult = false;
+ CameraMetadata collectedPartialResult;
CaptureResultExtras resultExtras;
bool hasInputBufferInRequest = false;
@@ -1749,28 +1780,46 @@
", burstId = %" PRId32,
__FUNCTION__, request.resultExtras.requestId, request.resultExtras.frameNumber,
request.resultExtras.burstId);
+ // Always update the partial count to the latest one. When framework aggregates adjacent
+ // partial results into one, the latest partial count will be used.
+ request.resultExtras.partialResultCount = result->partial_result;
// Check if this result carries only partial metadata
- if (mUsePartialResultQuirk && result->result != NULL) {
- camera_metadata_ro_entry_t partialResultEntry;
- res = find_camera_metadata_ro_entry(result->result,
- ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
- if (res != NAME_NOT_FOUND &&
- partialResultEntry.count > 0 &&
- partialResultEntry.data.u8[0] ==
- ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- // A partial result. Flag this as such, and collect this
- // set of metadata into the in-flight entry.
- partialResultQuirk = true;
- request.partialResultQuirk.collectedResult.append(
+ if (mUsePartialResult && result->result != NULL) {
+ if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+ if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
+ SET_ERR("Result is malformed for frame %d: partial_result %u must be in"
+ " the range of [1, %d] when metadata is included in the result",
+ frameNumber, result->partial_result, mNumPartialResults);
+ return;
+ }
+ isPartialResult = (result->partial_result < mNumPartialResults);
+ request.partialResult.collectedResult.append(
result->result);
- request.partialResultQuirk.collectedResult.erase(
- ANDROID_QUIRKS_PARTIAL_RESULT);
+ } else {
+ camera_metadata_ro_entry_t partialResultEntry;
+ res = find_camera_metadata_ro_entry(result->result,
+ ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
+ if (res != NAME_NOT_FOUND &&
+ partialResultEntry.count > 0 &&
+ partialResultEntry.data.u8[0] ==
+ ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ // A partial result. Flag this as such, and collect this
+ // set of metadata into the in-flight entry.
+ isPartialResult = true;
+ request.partialResult.collectedResult.append(
+ result->result);
+ request.partialResult.collectedResult.erase(
+ ANDROID_QUIRKS_PARTIAL_RESULT);
+ }
+ }
+
+ if (isPartialResult) {
// Fire off a 3A-only result if possible
- if (!request.partialResultQuirk.haveSent3A) {
- request.partialResultQuirk.haveSent3A =
- processPartial3AQuirk(frameNumber,
- request.partialResultQuirk.collectedResult,
+ if (!request.partialResult.haveSent3A) {
+ request.partialResult.haveSent3A =
+ processPartial3AResult(frameNumber,
+ request.partialResult.collectedResult,
request.resultExtras);
}
}
@@ -1786,23 +1835,23 @@
* - CAMERA3_MSG_SHUTTER (expected during normal operation)
* - CAMERA3_MSG_ERROR (expected during flush)
*/
- if (request.requestStatus == OK && timestamp == 0 && !partialResultQuirk) {
+ if (request.requestStatus == OK && timestamp == 0 && !isPartialResult) {
SET_ERR("Called before shutter notify for frame %d",
frameNumber);
return;
}
// Did we get the (final) result metadata for this capture?
- if (result->result != NULL && !partialResultQuirk) {
+ if (result->result != NULL && !isPartialResult) {
if (request.haveResultMetadata) {
SET_ERR("Called multiple times with metadata for frame %d",
frameNumber);
return;
}
- if (mUsePartialResultQuirk &&
- !request.partialResultQuirk.collectedResult.isEmpty()) {
- collectedQuirkResult.acquire(
- request.partialResultQuirk.collectedResult);
+ if (mUsePartialResult &&
+ !request.partialResult.collectedResult.isEmpty()) {
+ collectedPartialResult.acquire(
+ request.partialResult.collectedResult);
}
request.haveResultMetadata = true;
}
@@ -1842,7 +1891,7 @@
// Process the result metadata, if provided
bool gotResult = false;
- if (result->result != NULL && !partialResultQuirk) {
+ if (result->result != NULL && !isPartialResult) {
Mutex::Autolock l(mOutputLock);
gotResult = true;
@@ -1871,8 +1920,8 @@
}
// Append any previous partials to form a complete result
- if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
- captureResult.mMetadata.append(collectedQuirkResult);
+ if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
+ captureResult.mMetadata.append(collectedPartialResult);
}
captureResult.mMetadata.sort();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ea958b7..b1b0033 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -135,6 +135,8 @@
virtual status_t flush(int64_t *lastFrameNumber = NULL);
+ virtual uint32_t getDeviceVersion();
+
// Methods called by subclasses
void notifyStatus(bool idle); // updates from StatusTracker
@@ -168,7 +170,7 @@
CameraMetadata mDeviceInfo;
- int mDeviceVersion;
+ uint32_t mDeviceVersion;
enum Status {
STATUS_ERROR,
@@ -199,8 +201,11 @@
// Need to hold on to stream references until configure completes.
Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
- // Whether quirk ANDROID_QUIRKS_USE_PARTIAL_RESULT is enabled
- bool mUsePartialResultQuirk;
+ // Whether the HAL will send partial result
+ bool mUsePartialResult;
+
+ // Number of partial results that will be delivered by the HAL.
+ uint32_t mNumPartialResults;
/**** End scope for mLock ****/
@@ -507,17 +512,17 @@
// If this request has any input buffer
bool hasInputBuffer;
- // Fields used by the partial result quirk only
- struct PartialResultQuirkInFlight {
+ // Fields used by the partial result only
+ struct PartialResultInFlight {
// Set by process_capture_result once 3A has been sent to clients
bool haveSent3A;
// Result metadata collected so far, when partial results are in use
CameraMetadata collectedResult;
- PartialResultQuirkInFlight():
+ PartialResultInFlight():
haveSent3A(false) {
}
- } partialResultQuirk;
+ } partialResult;
// Default constructor needed by KeyedVector
InFlightRequest() :
@@ -564,11 +569,11 @@
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput);
/**
- * For the partial result quirk, check if all 3A state fields are available
+ * For the partial result, check if all 3A state fields are available
* and if so, queue up 3A-only result to the client. Returns true if 3A
* is sent.
*/
- bool processPartial3AQuirk(uint32_t frameNumber,
+ bool processPartial3AResult(uint32_t frameNumber,
const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
// Helpers for reading and writing 3A metadata into to/from partial results
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 51eb845..b7ccaab 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -33,11 +33,8 @@
libhardware \
libsoundtrigger
-LOCAL_STATIC_LIBRARIES := \
- libserviceutility
+#LOCAL_C_INCLUDES += \
-LOCAL_C_INCLUDES += \
- $(TOPDIR)frameworks/av/services/audioflinger
LOCAL_MODULE:= libsoundtriggerservice
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 3654136..747af79 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -22,18 +22,18 @@
#include <sys/types.h>
#include <pthread.h>
-#include <system/sound_trigger.h>
-#include <cutils/atomic.h>
-#include <cutils/properties.h>
-#include <utils/Errors.h>
-#include <utils/Log.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
+#include <cutils/atomic.h>
+#include <cutils/properties.h>
#include <hardware/hardware.h>
-#include <hardware/sound_trigger.h>
-#include <ServiceUtilities.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+
#include "SoundTriggerHwService.h"
+#include <system/sound_trigger.h>
+#include <hardware/sound_trigger.h>
namespace android {
@@ -103,10 +103,6 @@
uint32_t *numModules)
{
ALOGV("listModules");
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
-
AutoMutex lock(mServiceLock);
if (numModules == NULL || (*numModules != 0 && modules == NULL)) {
return BAD_VALUE;
@@ -124,10 +120,6 @@
sp<ISoundTrigger>& moduleInterface)
{
ALOGV("attach module %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
-
AutoMutex lock(mServiceLock);
moduleInterface.clear();
if (client == 0) {
@@ -147,8 +139,8 @@
}
void SoundTriggerHwService::detachModule(sp<Module> module) {
- ALOGV("detachModule");
AutoMutex lock(mServiceLock);
+ ALOGV("detachModule");
module->clearClient();
}
@@ -318,9 +310,6 @@
void SoundTriggerHwService::Module::detach() {
ALOGV("detach()");
- if (!captureHotwordAllowed()) {
- return;
- }
{
AutoMutex lock(mLock);
for (size_t i = 0; i < mModels.size(); i++) {
@@ -348,9 +337,6 @@
sound_model_handle_t *handle)
{
ALOGV("loadSoundModel() handle");
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
if (modelMemory == 0 || modelMemory->pointer() == NULL) {
ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
@@ -375,9 +361,6 @@
status_t SoundTriggerHwService::Module::unloadSoundModel(sound_model_handle_t handle)
{
ALOGV("unloadSoundModel() model handle %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
AutoMutex lock(mLock);
ssize_t index = mModels.indexOfKey(handle);
@@ -397,9 +380,6 @@
const sp<IMemory>& dataMemory)
{
ALOGV("startRecognition() model handle %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
if (dataMemory != 0 && dataMemory->pointer() == NULL) {
ALOGE("startRecognition() dataMemory is non-0 but has NULL pointer()");
@@ -435,9 +415,6 @@
status_t SoundTriggerHwService::Module::stopRecognition(sound_model_handle_t handle)
{
ALOGV("stopRecognition() model handle %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
AutoMutex lock(mLock);
sp<Model> model = getModel(handle);