Merge "Check if info for uri is available" into lmp-dev
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index dd63a23..f8c0198 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -320,6 +320,8 @@
audio_devices_t *device);
static status_t releaseSoundTriggerSession(audio_session_t session);
+ static audio_mode_t getPhoneState();
+
// ----------------------------------------------------------------------------
class AudioPortCallback : public RefBase
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index c251439..16fe9cf 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -142,6 +142,8 @@
audio_devices_t *device) = 0;
virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
+
+ virtual audio_mode_t getPhoneState() = 0;
};
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 1742fbe..dda3657 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -939,6 +939,15 @@
if (aps == 0) return PERMISSION_DENIED;
return aps->releaseSoundTriggerSession(session);
}
+
+audio_mode_t AudioSystem::getPhoneState()
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return AUDIO_MODE_INVALID;
+ return aps->getPhoneState();
+}
+
+
// ---------------------------------------------------------------------------
void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index ea7b279..e3beba5 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -2124,9 +2124,16 @@
// usage to stream type mapping
switch (aa.usage) {
+ case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ // TODO once AudioPolicyManager fully supports audio_attributes_t,
+ // remove stream change based on phone state
+ if (AudioSystem::getPhoneState() == AUDIO_MODE_RINGTONE) {
+ mStreamType = AUDIO_STREAM_RING;
+ break;
+ }
+ /// FALL THROUGH
case AUDIO_USAGE_MEDIA:
case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
mStreamType = AUDIO_STREAM_MUSIC;
return;
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index b57f747..256cb3f 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -67,7 +67,8 @@
REGISTER_CLIENT,
GET_OUTPUT_FOR_ATTR,
ACQUIRE_SOUNDTRIGGER_SESSION,
- RELEASE_SOUNDTRIGGER_SESSION
+ RELEASE_SOUNDTRIGGER_SESSION,
+ GET_PHONE_STATE
};
class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -607,6 +608,17 @@
}
return (status_t)reply.readInt32();
}
+
+ virtual audio_mode_t getPhoneState()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_PHONE_STATE, data, &reply);
+ if (status != NO_ERROR) {
+ return AUDIO_MODE_INVALID;
+ }
+ return (audio_mode_t)reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1057,6 +1069,12 @@
return NO_ERROR;
} break;
+ case GET_PHONE_STATE: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ reply->writeInt32((int32_t)getPhoneState());
+ return NO_ERROR;
+ } break;
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 73ac057..6d10651 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -20,6 +20,8 @@
#include "NuPlayerRenderer.h"
+#include <cutils/properties.h>
+
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -39,6 +41,16 @@
// static
const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
+static bool sFrameAccurateAVsync = false;
+
+static void readProperties() {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("persist.sys.media.avsync", value, NULL)) {
+ sFrameAccurateAVsync =
+ !strcmp("1", value) || !strcasecmp("true", value);
+ }
+}
+
NuPlayer::Renderer::Renderer(
const sp<MediaPlayerBase::AudioSink> &sink,
const sp<AMessage> ¬ify,
@@ -68,6 +80,7 @@
mVideoLateByUs(0ll),
mAudioOffloadPauseTimeoutGeneration(0),
mAudioOffloadTornDown(false) {
+ readProperties();
}
NuPlayer::Renderer::~Renderer() {
@@ -576,6 +589,11 @@
ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
// post 2 display refreshes before rendering is due
+ // FIXME currently this increases power consumption, so unless frame-accurate
+ // AV sync is requested, post closer to required render time (at 0.63 vsyncs)
+ if (!sFrameAccurateAVsync) {
+ twoVsyncsUs >>= 4;
+ }
msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
mDrainVideoQueuePending = true;
@@ -976,6 +994,8 @@
}
void NuPlayer::Renderer::onResume() {
+ readProperties();
+
if (!mPaused) {
return;
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 818bb05..66edf45 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2126,6 +2126,7 @@
size_t totalFramesWritten = mNormalSink->framesWritten();
if (totalFramesWritten >= mLatchD.mTimestamp.mPosition) {
mLatchD.mUnpresentedFrames = totalFramesWritten - mLatchD.mTimestamp.mPosition;
+ // mLatchD.mFramesReleased is set in threadloop_mix()
mLatchDValid = true;
}
}
@@ -2803,6 +2804,7 @@
// create a MonoPipe to connect our submix to FastMixer
NBAIO_Format format = mOutputSink->format();
+ NBAIO_Format origformat = format;
// adjust format to match that of the Fast Mixer
format.mFormat = fastMixerFormat;
format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
@@ -2822,14 +2824,15 @@
#ifdef TEE_SINK
if (mTeeSinkOutputEnabled) {
// create a Pipe to archive a copy of FastMixer's output for dumpsys
- Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, format);
+ Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, origformat);
+ const NBAIO_Format offers2[1] = {origformat};
numCounterOffers = 0;
- index = teeSink->negotiate(offers, 1, NULL, numCounterOffers);
+ index = teeSink->negotiate(offers2, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
mTeeSink = teeSink;
PipeReader *teeSource = new PipeReader(*teeSink);
numCounterOffers = 0;
- index = teeSource->negotiate(offers, 1, NULL, numCounterOffers);
+ index = teeSource->negotiate(offers2, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
mTeeSource = teeSource;
}
@@ -3091,6 +3094,18 @@
sleepTime = 0;
standbyTime = systemTime() + standbyDelay;
//TODO: delay standby when effects have a tail
+
+ mLatchD.mFramesReleased.clear();
+ {
+ Mutex::Autolock _l(mLock);
+ size_t size = mActiveTracks.size();
+ for (size_t i = 0; i < size; i++) {
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t != 0) {
+ mLatchD.mFramesReleased.add(t.get(), t->mAudioTrackServerProxy->framesReleased());
+ }
+ }
+ }
}
void AudioFlinger::MixerThread::threadLoop_sleepTime()
@@ -4040,6 +4055,9 @@
track->mState = TrackBase::STOPPED;
}
if (track->isStopped()) {
+ if (track->mState == TrackBase::FLUSHED) {
+ flushHw_l();
+ }
track->reset();
}
tracksToRemove->add(track);
@@ -4212,6 +4230,12 @@
}
}
+void AudioFlinger::DirectOutputThread::flushHw_l()
+{
+ if (mOutput->stream->flush != NULL)
+ mOutput->stream->flush(mOutput->stream);
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::AsyncCallbackThread::AsyncCallbackThread(
@@ -4579,7 +4603,7 @@
void AudioFlinger::OffloadThread::flushHw_l()
{
- mOutput->stream->flush(mOutput->stream);
+ DirectOutputThread::flushHw_l();
// Flush anything still waiting in the mixbuffer
mCurrentWriteLength = 0;
mBytesRemaining = 0;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index fd025b5..7d3b854 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -819,6 +819,7 @@
struct {
AudioTimestamp mTimestamp;
uint32_t mUnpresentedFrames;
+ KeyedVector<Track *, uint32_t> mFramesReleased;
} mLatchD, mLatchQ;
bool mLatchDValid; // true means mLatchD is valid, and clock it into latch at next opportunity
bool mLatchQValid; // true means mLatchQ is valid
@@ -894,6 +895,7 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
+ virtual void flushHw_l();
protected:
virtual int getTrackName_l(audio_channel_mask_t channelMask,
@@ -929,6 +931,7 @@
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, uint32_t device);
virtual ~OffloadThread() {};
+ virtual void flushHw_l();
protected:
// threadLoop snippets
@@ -941,9 +944,6 @@
virtual void onAddNewTrack_l();
private:
- void flushHw_l();
-
-private:
bool mHwPaused;
bool mFlushPending;
size_t mPausedWriteLength; // length in bytes of write interrupted by pause
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index c0a75b9..b2d53cf 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -824,6 +824,10 @@
// remove from active track list, reset(), and trigger presentation complete
if (playbackThread->mActiveTracks.indexOf(this) < 0) {
reset();
+ if (thread->type() == ThreadBase::DIRECT) {
+ DirectOutputThread *t = (DirectOutputThread *)playbackThread;
+ t->flushHw_l();
+ }
}
}
// Prevent flush being lost if the track is flushed and then resumed
@@ -894,7 +898,15 @@
uint32_t unpresentedFrames =
((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
playbackThread->mSampleRate;
- uint32_t framesWritten = mAudioTrackServerProxy->framesReleased();
+ // FIXME Since we're using a raw pointer as the key, it is theoretically possible
+ // for a brand new track to share the same address as a recently destroyed
+ // track, and thus for us to get the frames released of the wrong track.
+ // It is unlikely that we would be able to call getTimestamp() so quickly
+ // right after creating a new track. Nevertheless, the index here should
+ // be changed to something that is unique. Or use a completely different strategy.
+ ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this);
+ uint32_t framesWritten = i >= 0 ?
+ playbackThread->mLatchQ.mFramesReleased[i] : mAudioTrackServerProxy->framesReleased();
bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten;
if (framesWritten < unpresentedFrames) {
mPreviousValid = false;
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index 2c51e25..b212ca6 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -80,9 +80,16 @@
Mutex::Autolock _l(mLock);
mAudioPolicyManager->setPhoneState(state);
+ mPhoneState = state;
return NO_ERROR;
}
+audio_mode_t AudioPolicyService::getPhoneState()
+{
+ Mutex::Autolock _l(mLock);
+ return mPhoneState;
+}
+
status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config)
{
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
index f20c070..1e40bc3 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -84,9 +84,16 @@
Mutex::Autolock _l(mLock);
mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
+ mPhoneState = state;
return NO_ERROR;
}
+audio_mode_t AudioPolicyService::getPhoneState()
+{
+ Mutex::Autolock _l(mLock);
+ return mPhoneState;
+}
+
status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config)
{
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index d51ee8e..536987a 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -927,13 +927,23 @@
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
+ sp<IOProfile> profile;
+
+ // skip direct output selection if the request can obviously be attached to a mixed output
+ // and not explicitly requested
+ if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
+ audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE &&
+ audio_channel_count_from_out_mask(channelMask) <= 2) {
+ goto non_direct_output;
+ }
+
// Do not allow offloading if one non offloadable effect is enabled. This prevents from
// creating an offloaded track and tearing it down immediately after start when audioflinger
// detects there is an active non offloadable effect.
// FIXME: We should check the audio session here but we do not have it in this context.
// This may prevent offloading in rare situations where effects are left active by apps
// in the background.
- sp<IOProfile> profile;
+
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!isNonOffloadableEffectEnabled()) {
profile = getProfileForDirectOutput(device,
@@ -1016,6 +1026,8 @@
return output;
}
+non_direct_output:
+
// ignoring channel mask due to downmix capability in mixer
// open a non direct output
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
index 50bb8c7..647cda4 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -59,7 +59,7 @@
AudioPolicyService::AudioPolicyService()
: BnAudioPolicyService(), mpAudioPolicyDev(NULL), mpAudioPolicy(NULL),
- mAudioPolicyManager(NULL), mAudioPolicyClient(NULL)
+ mAudioPolicyManager(NULL), mAudioPolicyClient(NULL), mPhoneState(AUDIO_MODE_INVALID)
{
char value[PROPERTY_VALUE_MAX];
const struct hw_module_t *module;
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index 0044e7a..2cea40b 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -174,6 +174,8 @@
virtual status_t releaseSoundTriggerSession(audio_session_t session);
+ virtual audio_mode_t getPhoneState();
+
status_t doStopOutput(audio_io_handle_t output,
audio_stream_type_t stream,
int session = 0);
@@ -493,6 +495,7 @@
// Manage all effects configured in audio_effects.conf
sp<AudioPolicyEffects> mAudioPolicyEffects;
+ audio_mode_t mPhoneState;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 48ec730..3610362 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1049,32 +1049,35 @@
}
}
- if (mZslProcessor->getStreamId() != NO_STREAM) {
- ALOGV("%s: Camera %d: Clearing out zsl stream before "
- "creating recording stream", __FUNCTION__, mCameraId);
- res = mStreamingProcessor->stopStream();
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
- __FUNCTION__, mCameraId);
- return res;
- }
- res = mDevice->waitUntilDrained();
- if (res != OK) {
- ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- }
- res = mZslProcessor->clearZslQueue();
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't clear zsl queue",
- __FUNCTION__, mCameraId);
- return res;
- }
- res = mZslProcessor->deleteStream();
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to delete zsl stream before "
- "record: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
+ // On current HALs, clean up ZSL before transitioning into recording
+ if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
+ if (mZslProcessor->getStreamId() != NO_STREAM) {
+ ALOGV("%s: Camera %d: Clearing out zsl stream before "
+ "creating recording stream", __FUNCTION__, mCameraId);
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mZslProcessor->clearZslQueue();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't clear zsl queue",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mZslProcessor->deleteStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete zsl stream before "
+ "record: %s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
}
}
@@ -1082,34 +1085,53 @@
// and we can't fail record start without stagefright asserting.
params.previewCallbackFlags = 0;
- bool recordingStreamNeedsUpdate;
- res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't query recording stream",
- __FUNCTION__, mCameraId);
- return res;
- }
-
- if (recordingStreamNeedsUpdate) {
- // Need to stop stream here so updateProcessorStream won't trigger configureStream
- // Right now camera device cannot handle configureStream failure gracefully
- // when device is streaming
- res = mStreamingProcessor->stopStream();
+ if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
+ // For newer devices, may need to reconfigure video snapshot JPEG sizes
+ // during recording startup, so need a more complex sequence here to
+ // ensure an early stream reconfiguration doesn't happen
+ bool recordingStreamNeedsUpdate;
+ res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
if (res != OK) {
- ALOGE("%s: Camera %d: Can't stop streaming to update record stream",
+ ALOGE("%s: Camera %d: Can't query recording stream",
__FUNCTION__, mCameraId);
return res;
}
- res = mDevice->waitUntilDrained();
- if (res != OK) {
- ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
+
+ if (recordingStreamNeedsUpdate) {
+ // Need to stop stream here so updateProcessorStream won't trigger configureStream
+ // Right now camera device cannot handle configureStream failure gracefully
+ // when device is streaming
+ res = mStreamingProcessor->stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming to update record "
+ "stream", __FUNCTION__, mCameraId);
+ return res;
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ }
+
+ res = updateProcessorStream<
+ StreamingProcessor,
+ &StreamingProcessor::updateRecordingStream>(
+ mStreamingProcessor,
+ params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update recording stream: "
+ "%s (%d)", __FUNCTION__, mCameraId,
+ strerror(-res), res);
+ return res;
+ }
}
+ } else {
+ // Maintain call sequencing for HALv2 devices.
res = updateProcessorStream<
StreamingProcessor,
&StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
- params);
-
+ params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index cda98be..b433781 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -89,14 +89,27 @@
mCaptureConsumer->setFrameAvailableListener(this);
mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
mCaptureWindow = new Surface(producer);
+ }
+
+ // Since ashmem heaps are rounded up to page size, don't reallocate if
+ // the capture heap isn't exactly the same size as the required JPEG buffer
+ const size_t HEAP_SLACK_FACTOR = 2;
+ if (mCaptureHeap == 0 ||
+ (mCaptureHeap->getSize() < static_cast<size_t>(maxJpegSize)) ||
+ (mCaptureHeap->getSize() >
+ static_cast<size_t>(maxJpegSize) * HEAP_SLACK_FACTOR) ) {
// Create memory for API consumption
- mCaptureHeap = new MemoryHeapBase(maxJpegSize, 0, "Camera2Client::CaptureHeap");
+ mCaptureHeap.clear();
+ mCaptureHeap =
+ new MemoryHeapBase(maxJpegSize, 0, "Camera2Client::CaptureHeap");
if (mCaptureHeap->getSize() == 0) {
ALOGE("%s: Camera %d: Unable to allocate memory for capture",
__FUNCTION__, mId);
return NO_MEMORY;
}
}
+ ALOGV("%s: Camera %d: JPEG capture heap now %d bytes; requested %d bytes",
+ __FUNCTION__, mId, mCaptureHeap->getSize(), maxJpegSize);
if (mCaptureStreamId != NO_STREAM) {
// Check if stream parameters have to change
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 9818c96..7b90d28 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -622,8 +622,8 @@
camera_metadata_ro_entry_t availableAeModes =
staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
+ flashMode = Parameters::FLASH_MODE_OFF;
if (isFlashAvailable) {
- flashMode = Parameters::FLASH_MODE_OFF;
params.set(CameraParameters::KEY_FLASH_MODE,
CameraParameters::FLASH_MODE_OFF);
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index bb72206..8f78103 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -154,7 +154,7 @@
mId, strerror(-res), res);
return res;
}
- if (mDeleted || currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
+ if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
res = device->deleteReprocessStream(mZslReprocessStreamId);
if (res != OK) {